Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 3a7f009a

History | View | Annotate | Download (125.3 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90
target_ulong helper_load_purr (void)
91
{
92
    return (target_ulong)cpu_ppc_load_purr(env);
93
}
94
#endif
95

    
96
target_ulong helper_load_601_rtcl (void)
97
{
98
    return cpu_ppc601_load_rtcl(env);
99
}
100

    
101
target_ulong helper_load_601_rtcu (void)
102
{
103
    return cpu_ppc601_load_rtcu(env);
104
}
105

    
106
#if !defined(CONFIG_USER_ONLY)
107
#if defined (TARGET_PPC64)
108
void helper_store_asr (target_ulong val)
109
{
110
    ppc_store_asr(env, val);
111
}
112
#endif
113

    
114
void helper_store_sdr1 (target_ulong val)
115
{
116
    ppc_store_sdr1(env, val);
117
}
118

    
119
void helper_store_tbl (target_ulong val)
120
{
121
    cpu_ppc_store_tbl(env, val);
122
}
123

    
124
void helper_store_tbu (target_ulong val)
125
{
126
    cpu_ppc_store_tbu(env, val);
127
}
128

    
129
void helper_store_atbl (target_ulong val)
130
{
131
    cpu_ppc_store_atbl(env, val);
132
}
133

    
134
void helper_store_atbu (target_ulong val)
135
{
136
    cpu_ppc_store_atbu(env, val);
137
}
138

    
139
void helper_store_601_rtcl (target_ulong val)
140
{
141
    cpu_ppc601_store_rtcl(env, val);
142
}
143

    
144
void helper_store_601_rtcu (target_ulong val)
145
{
146
    cpu_ppc601_store_rtcu(env, val);
147
}
148

    
149
target_ulong helper_load_decr (void)
150
{
151
    return cpu_ppc_load_decr(env);
152
}
153

    
154
void helper_store_decr (target_ulong val)
155
{
156
    cpu_ppc_store_decr(env, val);
157
}
158

    
159
void helper_store_hid0_601 (target_ulong val)
160
{
161
    target_ulong hid0;
162

    
163
    hid0 = env->spr[SPR_HID0];
164
    if ((val ^ hid0) & 0x00000008) {
165
        /* Change current endianness */
166
        env->hflags &= ~(1 << MSR_LE);
167
        env->hflags_nmsr &= ~(1 << MSR_LE);
168
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169
        env->hflags |= env->hflags_nmsr;
170
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171
                 val & 0x8 ? 'l' : 'b', env->hflags);
172
    }
173
    env->spr[SPR_HID0] = (uint32_t)val;
174
}
175

    
176
void helper_store_403_pbr (uint32_t num, target_ulong value)
177
{
178
    if (likely(env->pb[num] != value)) {
179
        env->pb[num] = value;
180
        /* Should be optimized */
181
        tlb_flush(env, 1);
182
    }
183
}
184

    
185
target_ulong helper_load_40x_pit (void)
186
{
187
    return load_40x_pit(env);
188
}
189

    
190
void helper_store_40x_pit (target_ulong val)
191
{
192
    store_40x_pit(env, val);
193
}
194

    
195
void helper_store_40x_dbcr0 (target_ulong val)
196
{
197
    store_40x_dbcr0(env, val);
198
}
199

    
200
void helper_store_40x_sler (target_ulong val)
201
{
202
    store_40x_sler(env, val);
203
}
204

    
205
void helper_store_booke_tcr (target_ulong val)
206
{
207
    store_booke_tcr(env, val);
208
}
209

    
210
void helper_store_booke_tsr (target_ulong val)
211
{
212
    store_booke_tsr(env, val);
213
}
214

    
215
void helper_store_ibatu (uint32_t nr, target_ulong val)
216
{
217
    ppc_store_ibatu(env, nr, val);
218
}
219

    
220
void helper_store_ibatl (uint32_t nr, target_ulong val)
221
{
222
    ppc_store_ibatl(env, nr, val);
223
}
224

    
225
void helper_store_dbatu (uint32_t nr, target_ulong val)
226
{
227
    ppc_store_dbatu(env, nr, val);
228
}
229

    
230
void helper_store_dbatl (uint32_t nr, target_ulong val)
231
{
232
    ppc_store_dbatl(env, nr, val);
233
}
234

    
235
void helper_store_601_batl (uint32_t nr, target_ulong val)
236
{
237
    ppc_store_ibatl_601(env, nr, val);
238
}
239

    
240
void helper_store_601_batu (uint32_t nr, target_ulong val)
241
{
242
    ppc_store_ibatu_601(env, nr, val);
243
}
244
#endif
245

    
246
/*****************************************************************************/
247
/* Memory load and stores */
248

    
249
static inline target_ulong addr_add(target_ulong addr, target_long arg)
250
{
251
#if defined(TARGET_PPC64)
252
        if (!msr_sf)
253
            return (uint32_t)(addr + arg);
254
        else
255
#endif
256
            return addr + arg;
257
}
258

    
259
void helper_lmw (target_ulong addr, uint32_t reg)
260
{
261
    for (; reg < 32; reg++) {
262
        if (msr_le)
263
            env->gpr[reg] = bswap32(ldl(addr));
264
        else
265
            env->gpr[reg] = ldl(addr);
266
        addr = addr_add(addr, 4);
267
    }
268
}
269

    
270
void helper_stmw (target_ulong addr, uint32_t reg)
271
{
272
    for (; reg < 32; reg++) {
273
        if (msr_le)
274
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
275
        else
276
            stl(addr, (uint32_t)env->gpr[reg]);
277
        addr = addr_add(addr, 4);
278
    }
279
}
280

    
281
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
282
{
283
    int sh;
284
    for (; nb > 3; nb -= 4) {
285
        env->gpr[reg] = ldl(addr);
286
        reg = (reg + 1) % 32;
287
        addr = addr_add(addr, 4);
288
    }
289
    if (unlikely(nb > 0)) {
290
        env->gpr[reg] = 0;
291
        for (sh = 24; nb > 0; nb--, sh -= 8) {
292
            env->gpr[reg] |= ldub(addr) << sh;
293
            addr = addr_add(addr, 1);
294
        }
295
    }
296
}
297
/* PPC32 specification says we must generate an exception if
298
 * rA is in the range of registers to be loaded.
299
 * In an other hand, IBM says this is valid, but rA won't be loaded.
300
 * For now, I'll follow the spec...
301
 */
302
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
303
{
304
    if (likely(xer_bc != 0)) {
305
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306
                     (reg < rb && (reg + xer_bc) > rb))) {
307
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
308
                                       POWERPC_EXCP_INVAL |
309
                                       POWERPC_EXCP_INVAL_LSWX);
310
        } else {
311
            helper_lsw(addr, xer_bc, reg);
312
        }
313
    }
314
}
315

    
316
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
317
{
318
    int sh;
319
    for (; nb > 3; nb -= 4) {
320
        stl(addr, env->gpr[reg]);
321
        reg = (reg + 1) % 32;
322
        addr = addr_add(addr, 4);
323
    }
324
    if (unlikely(nb > 0)) {
325
        for (sh = 24; nb > 0; nb--, sh -= 8) {
326
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327
            addr = addr_add(addr, 1);
328
        }
329
    }
330
}
331

    
332
static void do_dcbz(target_ulong addr, int dcache_line_size)
333
{
334
    addr &= ~(dcache_line_size - 1);
335
    int i;
336
    for (i = 0 ; i < dcache_line_size ; i += 4) {
337
        stl(addr + i , 0);
338
    }
339
    if (env->reserve_addr == addr)
340
        env->reserve_addr = (target_ulong)-1ULL;
341
}
342

    
343
void helper_dcbz(target_ulong addr)
344
{
345
    do_dcbz(addr, env->dcache_line_size);
346
}
347

    
348
void helper_dcbz_970(target_ulong addr)
349
{
350
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
351
        do_dcbz(addr, 32);
352
    else
353
        do_dcbz(addr, env->dcache_line_size);
354
}
355

    
356
void helper_icbi(target_ulong addr)
357
{
358
    addr &= ~(env->dcache_line_size - 1);
359
    /* Invalidate one cache line :
360
     * PowerPC specification says this is to be treated like a load
361
     * (not a fetch) by the MMU. To be sure it will be so,
362
     * do the load "by hand".
363
     */
364
    ldl(addr);
365
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
366
}
367

    
368
// XXX: to be tested
369
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
370
{
371
    int i, c, d;
372
    d = 24;
373
    for (i = 0; i < xer_bc; i++) {
374
        c = ldub(addr);
375
        addr = addr_add(addr, 1);
376
        /* ra (if not 0) and rb are never modified */
377
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
378
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
379
        }
380
        if (unlikely(c == xer_cmp))
381
            break;
382
        if (likely(d != 0)) {
383
            d -= 8;
384
        } else {
385
            d = 24;
386
            reg++;
387
            reg = reg & 0x1F;
388
        }
389
    }
390
    return i;
391
}
392

    
393
/*****************************************************************************/
394
/* Fixed point operations helpers */
395
#if defined(TARGET_PPC64)
396

    
397
/* multiply high word */
398
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
399
{
400
    uint64_t tl, th;
401

    
402
    muls64(&tl, &th, arg1, arg2);
403
    return th;
404
}
405

    
406
/* multiply high word unsigned */
407
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
408
{
409
    uint64_t tl, th;
410

    
411
    mulu64(&tl, &th, arg1, arg2);
412
    return th;
413
}
414

    
415
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
416
{
417
    int64_t th;
418
    uint64_t tl;
419

    
420
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
421
    /* If th != 0 && th != -1, then we had an overflow */
422
    if (likely((uint64_t)(th + 1) <= 1)) {
423
        env->xer &= ~(1 << XER_OV);
424
    } else {
425
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
426
    }
427
    return (int64_t)tl;
428
}
429
#endif
430

    
431
target_ulong helper_cntlzw (target_ulong t)
432
{
433
    return clz32(t);
434
}
435

    
436
#if defined(TARGET_PPC64)
437
target_ulong helper_cntlzd (target_ulong t)
438
{
439
    return clz64(t);
440
}
441
#endif
442

    
443
/* shift right arithmetic helper */
444
target_ulong helper_sraw (target_ulong value, target_ulong shift)
445
{
446
    int32_t ret;
447

    
448
    if (likely(!(shift & 0x20))) {
449
        if (likely((uint32_t)shift != 0)) {
450
            shift &= 0x1f;
451
            ret = (int32_t)value >> shift;
452
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
453
                env->xer &= ~(1 << XER_CA);
454
            } else {
455
                env->xer |= (1 << XER_CA);
456
            }
457
        } else {
458
            ret = (int32_t)value;
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    } else {
462
        ret = (int32_t)value >> 31;
463
        if (ret) {
464
            env->xer |= (1 << XER_CA);
465
        } else {
466
            env->xer &= ~(1 << XER_CA);
467
        }
468
    }
469
    return (target_long)ret;
470
}
471

    
472
#if defined(TARGET_PPC64)
473
target_ulong helper_srad (target_ulong value, target_ulong shift)
474
{
475
    int64_t ret;
476

    
477
    if (likely(!(shift & 0x40))) {
478
        if (likely((uint64_t)shift != 0)) {
479
            shift &= 0x3f;
480
            ret = (int64_t)value >> shift;
481
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
482
                env->xer &= ~(1 << XER_CA);
483
            } else {
484
                env->xer |= (1 << XER_CA);
485
            }
486
        } else {
487
            ret = (int64_t)value;
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    } else {
491
        ret = (int64_t)value >> 63;
492
        if (ret) {
493
            env->xer |= (1 << XER_CA);
494
        } else {
495
            env->xer &= ~(1 << XER_CA);
496
        }
497
    }
498
    return ret;
499
}
500
#endif
501

    
502
target_ulong helper_popcntb (target_ulong val)
503
{
504
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
505
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
506
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
507
    return val;
508
}
509

    
510
#if defined(TARGET_PPC64)
511
target_ulong helper_popcntb_64 (target_ulong val)
512
{
513
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
514
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
515
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
516
    return val;
517
}
518
#endif
519

    
520
/*****************************************************************************/
521
/* Floating point operations helpers */
522
uint64_t helper_float32_to_float64(uint32_t arg)
523
{
524
    CPU_FloatU f;
525
    CPU_DoubleU d;
526
    f.l = arg;
527
    d.d = float32_to_float64(f.f, &env->fp_status);
528
    return d.ll;
529
}
530

    
531
uint32_t helper_float64_to_float32(uint64_t arg)
532
{
533
    CPU_FloatU f;
534
    CPU_DoubleU d;
535
    d.ll = arg;
536
    f.f = float64_to_float32(d.d, &env->fp_status);
537
    return f.l;
538
}
539

    
540
static inline int isden(float64 d)
541
{
542
    CPU_DoubleU u;
543

    
544
    u.d = d;
545

    
546
    return ((u.ll >> 52) & 0x7FF) == 0;
547
}
548

    
549
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
550
{
551
    CPU_DoubleU farg;
552
    int isneg;
553
    int ret;
554
    farg.ll = arg;
555
    isneg = float64_is_neg(farg.d);
556
    if (unlikely(float64_is_any_nan(farg.d))) {
557
        if (float64_is_signaling_nan(farg.d)) {
558
            /* Signaling NaN: flags are undefined */
559
            ret = 0x00;
560
        } else {
561
            /* Quiet NaN */
562
            ret = 0x11;
563
        }
564
    } else if (unlikely(float64_is_infinity(farg.d))) {
565
        /* +/- infinity */
566
        if (isneg)
567
            ret = 0x09;
568
        else
569
            ret = 0x05;
570
    } else {
571
        if (float64_is_zero(farg.d)) {
572
            /* +/- zero */
573
            if (isneg)
574
                ret = 0x12;
575
            else
576
                ret = 0x02;
577
        } else {
578
            if (isden(farg.d)) {
579
                /* Denormalized numbers */
580
                ret = 0x10;
581
            } else {
582
                /* Normalized numbers */
583
                ret = 0x00;
584
            }
585
            if (isneg) {
586
                ret |= 0x08;
587
            } else {
588
                ret |= 0x04;
589
            }
590
        }
591
    }
592
    if (set_fprf) {
593
        /* We update FPSCR_FPRF */
594
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
595
        env->fpscr |= ret << FPSCR_FPRF;
596
    }
597
    /* We just need fpcc to update Rc1 */
598
    return ret & 0xF;
599
}
600

    
601
/* Floating-point invalid operations exception */
602
static inline uint64_t fload_invalid_op_excp(int op)
603
{
604
    uint64_t ret = 0;
605
    int ve;
606

    
607
    ve = fpscr_ve;
608
    switch (op) {
609
    case POWERPC_EXCP_FP_VXSNAN:
610
        env->fpscr |= 1 << FPSCR_VXSNAN;
611
        break;
612
    case POWERPC_EXCP_FP_VXSOFT:
613
        env->fpscr |= 1 << FPSCR_VXSOFT;
614
        break;
615
    case POWERPC_EXCP_FP_VXISI:
616
        /* Magnitude subtraction of infinities */
617
        env->fpscr |= 1 << FPSCR_VXISI;
618
        goto update_arith;
619
    case POWERPC_EXCP_FP_VXIDI:
620
        /* Division of infinity by infinity */
621
        env->fpscr |= 1 << FPSCR_VXIDI;
622
        goto update_arith;
623
    case POWERPC_EXCP_FP_VXZDZ:
624
        /* Division of zero by zero */
625
        env->fpscr |= 1 << FPSCR_VXZDZ;
626
        goto update_arith;
627
    case POWERPC_EXCP_FP_VXIMZ:
628
        /* Multiplication of zero by infinity */
629
        env->fpscr |= 1 << FPSCR_VXIMZ;
630
        goto update_arith;
631
    case POWERPC_EXCP_FP_VXVC:
632
        /* Ordered comparison of NaN */
633
        env->fpscr |= 1 << FPSCR_VXVC;
634
        env->fpscr &= ~(0xF << FPSCR_FPCC);
635
        env->fpscr |= 0x11 << FPSCR_FPCC;
636
        /* We must update the target FPR before raising the exception */
637
        if (ve != 0) {
638
            env->exception_index = POWERPC_EXCP_PROGRAM;
639
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
640
            /* Update the floating-point enabled exception summary */
641
            env->fpscr |= 1 << FPSCR_FEX;
642
            /* Exception is differed */
643
            ve = 0;
644
        }
645
        break;
646
    case POWERPC_EXCP_FP_VXSQRT:
647
        /* Square root of a negative number */
648
        env->fpscr |= 1 << FPSCR_VXSQRT;
649
    update_arith:
650
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
651
        if (ve == 0) {
652
            /* Set the result to quiet NaN */
653
            ret = 0x7FF8000000000000ULL;
654
            env->fpscr &= ~(0xF << FPSCR_FPCC);
655
            env->fpscr |= 0x11 << FPSCR_FPCC;
656
        }
657
        break;
658
    case POWERPC_EXCP_FP_VXCVI:
659
        /* Invalid conversion */
660
        env->fpscr |= 1 << FPSCR_VXCVI;
661
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
662
        if (ve == 0) {
663
            /* Set the result to quiet NaN */
664
            ret = 0x7FF8000000000000ULL;
665
            env->fpscr &= ~(0xF << FPSCR_FPCC);
666
            env->fpscr |= 0x11 << FPSCR_FPCC;
667
        }
668
        break;
669
    }
670
    /* Update the floating-point invalid operation summary */
671
    env->fpscr |= 1 << FPSCR_VX;
672
    /* Update the floating-point exception summary */
673
    env->fpscr |= 1 << FPSCR_FX;
674
    if (ve != 0) {
675
        /* Update the floating-point enabled exception summary */
676
        env->fpscr |= 1 << FPSCR_FEX;
677
        if (msr_fe0 != 0 || msr_fe1 != 0)
678
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
679
    }
680
    return ret;
681
}
682

    
683
static inline void float_zero_divide_excp(void)
684
{
685
    env->fpscr |= 1 << FPSCR_ZX;
686
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
687
    /* Update the floating-point exception summary */
688
    env->fpscr |= 1 << FPSCR_FX;
689
    if (fpscr_ze != 0) {
690
        /* Update the floating-point enabled exception summary */
691
        env->fpscr |= 1 << FPSCR_FEX;
692
        if (msr_fe0 != 0 || msr_fe1 != 0) {
693
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
694
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
695
        }
696
    }
697
}
698

    
699
static inline void float_overflow_excp(void)
700
{
701
    env->fpscr |= 1 << FPSCR_OX;
702
    /* Update the floating-point exception summary */
703
    env->fpscr |= 1 << FPSCR_FX;
704
    if (fpscr_oe != 0) {
705
        /* XXX: should adjust the result */
706
        /* Update the floating-point enabled exception summary */
707
        env->fpscr |= 1 << FPSCR_FEX;
708
        /* We must update the target FPR before raising the exception */
709
        env->exception_index = POWERPC_EXCP_PROGRAM;
710
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
711
    } else {
712
        env->fpscr |= 1 << FPSCR_XX;
713
        env->fpscr |= 1 << FPSCR_FI;
714
    }
715
}
716

    
717
static inline void float_underflow_excp(void)
718
{
719
    env->fpscr |= 1 << FPSCR_UX;
720
    /* Update the floating-point exception summary */
721
    env->fpscr |= 1 << FPSCR_FX;
722
    if (fpscr_ue != 0) {
723
        /* XXX: should adjust the result */
724
        /* Update the floating-point enabled exception summary */
725
        env->fpscr |= 1 << FPSCR_FEX;
726
        /* We must update the target FPR before raising the exception */
727
        env->exception_index = POWERPC_EXCP_PROGRAM;
728
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
729
    }
730
}
731

    
732
static inline void float_inexact_excp(void)
733
{
734
    env->fpscr |= 1 << FPSCR_XX;
735
    /* Update the floating-point exception summary */
736
    env->fpscr |= 1 << FPSCR_FX;
737
    if (fpscr_xe != 0) {
738
        /* Update the floating-point enabled exception summary */
739
        env->fpscr |= 1 << FPSCR_FEX;
740
        /* We must update the target FPR before raising the exception */
741
        env->exception_index = POWERPC_EXCP_PROGRAM;
742
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
743
    }
744
}
745

    
746
static inline void fpscr_set_rounding_mode(void)
747
{
748
    int rnd_type;
749

    
750
    /* Set rounding mode */
751
    switch (fpscr_rn) {
752
    case 0:
753
        /* Best approximation (round to nearest) */
754
        rnd_type = float_round_nearest_even;
755
        break;
756
    case 1:
757
        /* Smaller magnitude (round toward zero) */
758
        rnd_type = float_round_to_zero;
759
        break;
760
    case 2:
761
        /* Round toward +infinite */
762
        rnd_type = float_round_up;
763
        break;
764
    default:
765
    case 3:
766
        /* Round toward -infinite */
767
        rnd_type = float_round_down;
768
        break;
769
    }
770
    set_float_rounding_mode(rnd_type, &env->fp_status);
771
}
772

    
773
void helper_fpscr_clrbit (uint32_t bit)
774
{
775
    int prev;
776

    
777
    prev = (env->fpscr >> bit) & 1;
778
    env->fpscr &= ~(1 << bit);
779
    if (prev == 1) {
780
        switch (bit) {
781
        case FPSCR_RN1:
782
        case FPSCR_RN:
783
            fpscr_set_rounding_mode();
784
            break;
785
        default:
786
            break;
787
        }
788
    }
789
}
790

    
791
void helper_fpscr_setbit (uint32_t bit)
792
{
793
    int prev;
794

    
795
    prev = (env->fpscr >> bit) & 1;
796
    env->fpscr |= 1 << bit;
797
    if (prev == 0) {
798
        switch (bit) {
799
        case FPSCR_VX:
800
            env->fpscr |= 1 << FPSCR_FX;
801
            if (fpscr_ve)
802
                goto raise_ve;
803
        case FPSCR_OX:
804
            env->fpscr |= 1 << FPSCR_FX;
805
            if (fpscr_oe)
806
                goto raise_oe;
807
            break;
808
        case FPSCR_UX:
809
            env->fpscr |= 1 << FPSCR_FX;
810
            if (fpscr_ue)
811
                goto raise_ue;
812
            break;
813
        case FPSCR_ZX:
814
            env->fpscr |= 1 << FPSCR_FX;
815
            if (fpscr_ze)
816
                goto raise_ze;
817
            break;
818
        case FPSCR_XX:
819
            env->fpscr |= 1 << FPSCR_FX;
820
            if (fpscr_xe)
821
                goto raise_xe;
822
            break;
823
        case FPSCR_VXSNAN:
824
        case FPSCR_VXISI:
825
        case FPSCR_VXIDI:
826
        case FPSCR_VXZDZ:
827
        case FPSCR_VXIMZ:
828
        case FPSCR_VXVC:
829
        case FPSCR_VXSOFT:
830
        case FPSCR_VXSQRT:
831
        case FPSCR_VXCVI:
832
            env->fpscr |= 1 << FPSCR_VX;
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ve != 0)
835
                goto raise_ve;
836
            break;
837
        case FPSCR_VE:
838
            if (fpscr_vx != 0) {
839
            raise_ve:
840
                env->error_code = POWERPC_EXCP_FP;
841
                if (fpscr_vxsnan)
842
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
843
                if (fpscr_vxisi)
844
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
845
                if (fpscr_vxidi)
846
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
847
                if (fpscr_vxzdz)
848
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
849
                if (fpscr_vximz)
850
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
851
                if (fpscr_vxvc)
852
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
853
                if (fpscr_vxsoft)
854
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
855
                if (fpscr_vxsqrt)
856
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
857
                if (fpscr_vxcvi)
858
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
859
                goto raise_excp;
860
            }
861
            break;
862
        case FPSCR_OE:
863
            if (fpscr_ox != 0) {
864
            raise_oe:
865
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
866
                goto raise_excp;
867
            }
868
            break;
869
        case FPSCR_UE:
870
            if (fpscr_ux != 0) {
871
            raise_ue:
872
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
873
                goto raise_excp;
874
            }
875
            break;
876
        case FPSCR_ZE:
877
            if (fpscr_zx != 0) {
878
            raise_ze:
879
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
880
                goto raise_excp;
881
            }
882
            break;
883
        case FPSCR_XE:
884
            if (fpscr_xx != 0) {
885
            raise_xe:
886
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
887
                goto raise_excp;
888
            }
889
            break;
890
        case FPSCR_RN1:
891
        case FPSCR_RN:
892
            fpscr_set_rounding_mode();
893
            break;
894
        default:
895
            break;
896
        raise_excp:
897
            /* Update the floating-point enabled exception summary */
898
            env->fpscr |= 1 << FPSCR_FEX;
899
                /* We have to update Rc1 before raising the exception */
900
            env->exception_index = POWERPC_EXCP_PROGRAM;
901
            break;
902
        }
903
    }
904
}
905

    
906
void helper_store_fpscr (uint64_t arg, uint32_t mask)
907
{
908
    /*
909
     * We use only the 32 LSB of the incoming fpr
910
     */
911
    uint32_t prev, new;
912
    int i;
913

    
914
    prev = env->fpscr;
915
    new = (uint32_t)arg;
916
    new &= ~0x60000000;
917
    new |= prev & 0x60000000;
918
    for (i = 0; i < 8; i++) {
919
        if (mask & (1 << i)) {
920
            env->fpscr &= ~(0xF << (4 * i));
921
            env->fpscr |= new & (0xF << (4 * i));
922
        }
923
    }
924
    /* Update VX and FEX */
925
    if (fpscr_ix != 0)
926
        env->fpscr |= 1 << FPSCR_VX;
927
    else
928
        env->fpscr &= ~(1 << FPSCR_VX);
929
    if ((fpscr_ex & fpscr_eex) != 0) {
930
        env->fpscr |= 1 << FPSCR_FEX;
931
        env->exception_index = POWERPC_EXCP_PROGRAM;
932
        /* XXX: we should compute it properly */
933
        env->error_code = POWERPC_EXCP_FP;
934
    }
935
    else
936
        env->fpscr &= ~(1 << FPSCR_FEX);
937
    fpscr_set_rounding_mode();
938
}
939

    
940
void helper_float_check_status (void)
941
{
942
#ifdef CONFIG_SOFTFLOAT
943
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
944
        (env->error_code & POWERPC_EXCP_FP)) {
945
        /* Differred floating-point exception after target FPR update */
946
        if (msr_fe0 != 0 || msr_fe1 != 0)
947
            helper_raise_exception_err(env->exception_index, env->error_code);
948
    } else {
949
        int status = get_float_exception_flags(&env->fp_status);
950
        if (status & float_flag_divbyzero) {
951
            float_zero_divide_excp();
952
        } else if (status & float_flag_overflow) {
953
            float_overflow_excp();
954
        } else if (status & float_flag_underflow) {
955
            float_underflow_excp();
956
        } else if (status & float_flag_inexact) {
957
            float_inexact_excp();
958
        }
959
    }
960
#else
961
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
962
        (env->error_code & POWERPC_EXCP_FP)) {
963
        /* Differred floating-point exception after target FPR update */
964
        if (msr_fe0 != 0 || msr_fe1 != 0)
965
            helper_raise_exception_err(env->exception_index, env->error_code);
966
    }
967
#endif
968
}
969

    
970
#ifdef CONFIG_SOFTFLOAT
971
void helper_reset_fpstatus (void)
972
{
973
    set_float_exception_flags(0, &env->fp_status);
974
}
975
#endif
976

    
977
/* fadd - fadd. */
978
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
979
{
980
    CPU_DoubleU farg1, farg2;
981

    
982
    farg1.ll = arg1;
983
    farg2.ll = arg2;
984

    
985
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
986
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
987
        /* Magnitude subtraction of infinities */
988
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
989
    } else {
990
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
991
                     float64_is_signaling_nan(farg2.d))) {
992
            /* sNaN addition */
993
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
994
        }
995
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
996
    }
997

    
998
    return farg1.ll;
999
}
1000

    
1001
/* fsub - fsub. */
1002
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1003
{
1004
    CPU_DoubleU farg1, farg2;
1005

    
1006
    farg1.ll = arg1;
1007
    farg2.ll = arg2;
1008

    
1009
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1011
        /* Magnitude subtraction of infinities */
1012
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1013
    } else {
1014
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1015
                     float64_is_signaling_nan(farg2.d))) {
1016
            /* sNaN subtraction */
1017
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1018
        }
1019
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1020
    }
1021

    
1022
    return farg1.ll;
1023
}
1024

    
1025
/* fmul - fmul. */
1026
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1027
{
1028
    CPU_DoubleU farg1, farg2;
1029

    
1030
    farg1.ll = arg1;
1031
    farg2.ll = arg2;
1032

    
1033
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1034
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1035
        /* Multiplication of zero by infinity */
1036
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1037
    } else {
1038
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1039
                     float64_is_signaling_nan(farg2.d))) {
1040
            /* sNaN multiplication */
1041
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1042
        }
1043
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1044
    }
1045

    
1046
    return farg1.ll;
1047
}
1048

    
1049
/* fdiv - fdiv. */
1050
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1051
{
1052
    CPU_DoubleU farg1, farg2;
1053

    
1054
    farg1.ll = arg1;
1055
    farg2.ll = arg2;
1056

    
1057
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1058
        /* Division of infinity by infinity */
1059
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1060
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1061
        /* Division of zero by zero */
1062
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1063
    } else {
1064
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1065
                     float64_is_signaling_nan(farg2.d))) {
1066
            /* sNaN division */
1067
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1068
        }
1069
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1070
    }
1071

    
1072
    return farg1.ll;
1073
}
1074

    
1075
/* fabs */
1076
uint64_t helper_fabs (uint64_t arg)
1077
{
1078
    CPU_DoubleU farg;
1079

    
1080
    farg.ll = arg;
1081
    farg.d = float64_abs(farg.d);
1082
    return farg.ll;
1083
}
1084

    
1085
/* fnabs */
1086
uint64_t helper_fnabs (uint64_t arg)
1087
{
1088
    CPU_DoubleU farg;
1089

    
1090
    farg.ll = arg;
1091
    farg.d = float64_abs(farg.d);
1092
    farg.d = float64_chs(farg.d);
1093
    return farg.ll;
1094
}
1095

    
1096
/* fneg */
1097
uint64_t helper_fneg (uint64_t arg)
1098
{
1099
    CPU_DoubleU farg;
1100

    
1101
    farg.ll = arg;
1102
    farg.d = float64_chs(farg.d);
1103
    return farg.ll;
1104
}
1105

    
1106
/* fctiw - fctiw. */
1107
uint64_t helper_fctiw (uint64_t arg)
1108
{
1109
    CPU_DoubleU farg;
1110
    farg.ll = arg;
1111

    
1112
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1113
        /* sNaN conversion */
1114
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1115
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1116
        /* qNan / infinity conversion */
1117
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1118
    } else {
1119
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1120
        /* XXX: higher bits are not supposed to be significant.
1121
         *     to make tests easier, return the same as a real PowerPC 750
1122
         */
1123
        farg.ll |= 0xFFF80000ULL << 32;
1124
    }
1125
    return farg.ll;
1126
}
1127

    
1128
/* fctiwz - fctiwz. */
1129
uint64_t helper_fctiwz (uint64_t arg)
1130
{
1131
    CPU_DoubleU farg;
1132
    farg.ll = arg;
1133

    
1134
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1135
        /* sNaN conversion */
1136
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1137
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1138
        /* qNan / infinity conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1140
    } else {
1141
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1142
        /* XXX: higher bits are not supposed to be significant.
1143
         *     to make tests easier, return the same as a real PowerPC 750
1144
         */
1145
        farg.ll |= 0xFFF80000ULL << 32;
1146
    }
1147
    return farg.ll;
1148
}
1149

    
1150
#if defined(TARGET_PPC64)
1151
/* fcfid - fcfid. */
1152
uint64_t helper_fcfid (uint64_t arg)
1153
{
1154
    CPU_DoubleU farg;
1155
    farg.d = int64_to_float64(arg, &env->fp_status);
1156
    return farg.ll;
1157
}
1158

    
1159
/* fctid - fctid. */
1160
uint64_t helper_fctid (uint64_t arg)
1161
{
1162
    CPU_DoubleU farg;
1163
    farg.ll = arg;
1164

    
1165
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1166
        /* sNaN conversion */
1167
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1168
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1169
        /* qNan / infinity conversion */
1170
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1171
    } else {
1172
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1173
    }
1174
    return farg.ll;
1175
}
1176

    
1177
/* fctidz - fctidz. */
1178
uint64_t helper_fctidz (uint64_t arg)
1179
{
1180
    CPU_DoubleU farg;
1181
    farg.ll = arg;
1182

    
1183
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1184
        /* sNaN conversion */
1185
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1186
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1187
        /* qNan / infinity conversion */
1188
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1189
    } else {
1190
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1191
    }
1192
    return farg.ll;
1193
}
1194

    
1195
#endif
1196

    
1197
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1198
{
1199
    CPU_DoubleU farg;
1200
    farg.ll = arg;
1201

    
1202
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1203
        /* sNaN round */
1204
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1205
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1206
        /* qNan / infinity round */
1207
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1208
    } else {
1209
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1210
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1211
        /* Restore rounding mode from FPSCR */
1212
        fpscr_set_rounding_mode();
1213
    }
1214
    return farg.ll;
1215
}
1216

    
1217
uint64_t helper_frin (uint64_t arg)
1218
{
1219
    return do_fri(arg, float_round_nearest_even);
1220
}
1221

    
1222
uint64_t helper_friz (uint64_t arg)
1223
{
1224
    return do_fri(arg, float_round_to_zero);
1225
}
1226

    
1227
uint64_t helper_frip (uint64_t arg)
1228
{
1229
    return do_fri(arg, float_round_up);
1230
}
1231

    
1232
uint64_t helper_frim (uint64_t arg)
1233
{
1234
    return do_fri(arg, float_round_down);
1235
}
1236

    
1237
/* fmadd - fmadd. */
1238
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1239
{
1240
    CPU_DoubleU farg1, farg2, farg3;
1241

    
1242
    farg1.ll = arg1;
1243
    farg2.ll = arg2;
1244
    farg3.ll = arg3;
1245

    
1246
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1247
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1248
        /* Multiplication of zero by infinity */
1249
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1250
    } else {
1251
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1252
                     float64_is_signaling_nan(farg2.d) ||
1253
                     float64_is_signaling_nan(farg3.d))) {
1254
            /* sNaN operation */
1255
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1256
        }
1257
#ifdef FLOAT128
1258
        /* This is the way the PowerPC specification defines it */
1259
        float128 ft0_128, ft1_128;
1260

    
1261
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1262
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1263
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1264
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1265
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1266
            /* Magnitude subtraction of infinities */
1267
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1268
        } else {
1269
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1270
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1271
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1272
        }
1273
#else
1274
        /* This is OK on x86 hosts */
1275
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1276
#endif
1277
    }
1278

    
1279
    return farg1.ll;
1280
}
1281

    
1282
/* fmsub - fmsub. */
1283
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1284
{
1285
    CPU_DoubleU farg1, farg2, farg3;
1286

    
1287
    farg1.ll = arg1;
1288
    farg2.ll = arg2;
1289
    farg3.ll = arg3;
1290

    
1291
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1292
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1293
        /* Multiplication of zero by infinity */
1294
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1295
    } else {
1296
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1297
                     float64_is_signaling_nan(farg2.d) ||
1298
                     float64_is_signaling_nan(farg3.d))) {
1299
            /* sNaN operation */
1300
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1301
        }
1302
#ifdef FLOAT128
1303
        /* This is the way the PowerPC specification defines it */
1304
        float128 ft0_128, ft1_128;
1305

    
1306
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1307
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1308
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1309
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1310
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1311
            /* Magnitude subtraction of infinities */
1312
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1313
        } else {
1314
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1315
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1316
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1317
        }
1318
#else
1319
        /* This is OK on x86 hosts */
1320
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1321
#endif
1322
    }
1323
    return farg1.ll;
1324
}
1325

    
1326
/* fnmadd - fnmadd. */
1327
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1328
{
1329
    CPU_DoubleU farg1, farg2, farg3;
1330

    
1331
    farg1.ll = arg1;
1332
    farg2.ll = arg2;
1333
    farg3.ll = arg3;
1334

    
1335
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1336
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1337
        /* Multiplication of zero by infinity */
1338
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1339
    } else {
1340
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1341
                     float64_is_signaling_nan(farg2.d) ||
1342
                     float64_is_signaling_nan(farg3.d))) {
1343
            /* sNaN operation */
1344
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1345
        }
1346
#ifdef FLOAT128
1347
        /* This is the way the PowerPC specification defines it */
1348
        float128 ft0_128, ft1_128;
1349

    
1350
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1351
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1352
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1353
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1354
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1355
            /* Magnitude subtraction of infinities */
1356
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1357
        } else {
1358
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1359
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1360
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1361
        }
1362
#else
1363
        /* This is OK on x86 hosts */
1364
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1365
#endif
1366
        if (likely(!float64_is_any_nan(farg1.d))) {
1367
            farg1.d = float64_chs(farg1.d);
1368
        }
1369
    }
1370
    return farg1.ll;
1371
}
1372

    
1373
/* fnmsub - fnmsub. */
1374
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1375
{
1376
    CPU_DoubleU farg1, farg2, farg3;
1377

    
1378
    farg1.ll = arg1;
1379
    farg2.ll = arg2;
1380
    farg3.ll = arg3;
1381

    
1382
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1383
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1384
        /* Multiplication of zero by infinity */
1385
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1386
    } else {
1387
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1388
                     float64_is_signaling_nan(farg2.d) ||
1389
                     float64_is_signaling_nan(farg3.d))) {
1390
            /* sNaN operation */
1391
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1392
        }
1393
#ifdef FLOAT128
1394
        /* This is the way the PowerPC specification defines it */
1395
        float128 ft0_128, ft1_128;
1396

    
1397
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1398
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1399
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1400
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1401
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1402
            /* Magnitude subtraction of infinities */
1403
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1404
        } else {
1405
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1406
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1407
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1408
        }
1409
#else
1410
        /* This is OK on x86 hosts */
1411
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1412
#endif
1413
        if (likely(!float64_is_any_nan(farg1.d))) {
1414
            farg1.d = float64_chs(farg1.d);
1415
        }
1416
    }
1417
    return farg1.ll;
1418
}
1419

    
1420
/* frsp - frsp. */
1421
uint64_t helper_frsp (uint64_t arg)
1422
{
1423
    CPU_DoubleU farg;
1424
    float32 f32;
1425
    farg.ll = arg;
1426

    
1427
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1428
        /* sNaN square root */
1429
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1430
    }
1431
    f32 = float64_to_float32(farg.d, &env->fp_status);
1432
    farg.d = float32_to_float64(f32, &env->fp_status);
1433

    
1434
    return farg.ll;
1435
}
1436

    
1437
/* fsqrt - fsqrt. */
1438
uint64_t helper_fsqrt (uint64_t arg)
1439
{
1440
    CPU_DoubleU farg;
1441
    farg.ll = arg;
1442

    
1443
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1444
        /* Square root of a negative nonzero number */
1445
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1446
    } else {
1447
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1448
            /* sNaN square root */
1449
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1450
        }
1451
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1452
    }
1453
    return farg.ll;
1454
}
1455

    
1456
/* fre - fre. */
1457
uint64_t helper_fre (uint64_t arg)
1458
{
1459
    CPU_DoubleU farg;
1460
    farg.ll = arg;
1461

    
1462
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1463
        /* sNaN reciprocal */
1464
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1465
    }
1466
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1467
    return farg.d;
1468
}
1469

    
1470
/* fres - fres. */
1471
uint64_t helper_fres (uint64_t arg)
1472
{
1473
    CPU_DoubleU farg;
1474
    float32 f32;
1475
    farg.ll = arg;
1476

    
1477
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1478
        /* sNaN reciprocal */
1479
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1480
    }
1481
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1482
    f32 = float64_to_float32(farg.d, &env->fp_status);
1483
    farg.d = float32_to_float64(f32, &env->fp_status);
1484

    
1485
    return farg.ll;
1486
}
1487

    
1488
/* frsqrte  - frsqrte. */
1489
uint64_t helper_frsqrte (uint64_t arg)
1490
{
1491
    CPU_DoubleU farg;
1492
    float32 f32;
1493
    farg.ll = arg;
1494

    
1495
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1496
        /* Reciprocal square root of a negative nonzero number */
1497
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1498
    } else {
1499
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1500
            /* sNaN reciprocal square root */
1501
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1502
        }
1503
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1504
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1505
        f32 = float64_to_float32(farg.d, &env->fp_status);
1506
        farg.d = float32_to_float64(f32, &env->fp_status);
1507
    }
1508
    return farg.ll;
1509
}
1510

    
1511
/* fsel - fsel. */
1512
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1513
{
1514
    CPU_DoubleU farg1;
1515

    
1516
    farg1.ll = arg1;
1517

    
1518
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1519
        return arg2;
1520
    } else {
1521
        return arg3;
1522
    }
1523
}
1524

    
1525
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1526
{
1527
    CPU_DoubleU farg1, farg2;
1528
    uint32_t ret = 0;
1529
    farg1.ll = arg1;
1530
    farg2.ll = arg2;
1531

    
1532
    if (unlikely(float64_is_any_nan(farg1.d) ||
1533
                 float64_is_any_nan(farg2.d))) {
1534
        ret = 0x01UL;
1535
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1536
        ret = 0x08UL;
1537
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1538
        ret = 0x04UL;
1539
    } else {
1540
        ret = 0x02UL;
1541
    }
1542

    
1543
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1544
    env->fpscr |= ret << FPSCR_FPRF;
1545
    env->crf[crfD] = ret;
1546
    if (unlikely(ret == 0x01UL
1547
                 && (float64_is_signaling_nan(farg1.d) ||
1548
                     float64_is_signaling_nan(farg2.d)))) {
1549
        /* sNaN comparison */
1550
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1551
    }
1552
}
1553

    
1554
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1555
{
1556
    CPU_DoubleU farg1, farg2;
1557
    uint32_t ret = 0;
1558
    farg1.ll = arg1;
1559
    farg2.ll = arg2;
1560

    
1561
    if (unlikely(float64_is_any_nan(farg1.d) ||
1562
                 float64_is_any_nan(farg2.d))) {
1563
        ret = 0x01UL;
1564
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1565
        ret = 0x08UL;
1566
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1567
        ret = 0x04UL;
1568
    } else {
1569
        ret = 0x02UL;
1570
    }
1571

    
1572
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1573
    env->fpscr |= ret << FPSCR_FPRF;
1574
    env->crf[crfD] = ret;
1575
    if (unlikely (ret == 0x01UL)) {
1576
        if (float64_is_signaling_nan(farg1.d) ||
1577
            float64_is_signaling_nan(farg2.d)) {
1578
            /* sNaN comparison */
1579
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1580
                                  POWERPC_EXCP_FP_VXVC);
1581
        } else {
1582
            /* qNaN comparison */
1583
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1584
        }
1585
    }
1586
}
1587

    
1588
#if !defined (CONFIG_USER_ONLY)
1589
void helper_store_msr (target_ulong val)
1590
{
1591
    val = hreg_store_msr(env, val, 0);
1592
    if (val != 0) {
1593
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1594
        helper_raise_exception(val);
1595
    }
1596
}
1597

    
1598
static inline void do_rfi(target_ulong nip, target_ulong msr,
1599
                          target_ulong msrm, int keep_msrh)
1600
{
1601
#if defined(TARGET_PPC64)
1602
    if (msr & (1ULL << MSR_SF)) {
1603
        nip = (uint64_t)nip;
1604
        msr &= (uint64_t)msrm;
1605
    } else {
1606
        nip = (uint32_t)nip;
1607
        msr = (uint32_t)(msr & msrm);
1608
        if (keep_msrh)
1609
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1610
    }
1611
#else
1612
    nip = (uint32_t)nip;
1613
    msr &= (uint32_t)msrm;
1614
#endif
1615
    /* XXX: beware: this is false if VLE is supported */
1616
    env->nip = nip & ~((target_ulong)0x00000003);
1617
    hreg_store_msr(env, msr, 1);
1618
#if defined (DEBUG_OP)
1619
    cpu_dump_rfi(env->nip, env->msr);
1620
#endif
1621
    /* No need to raise an exception here,
1622
     * as rfi is always the last insn of a TB
1623
     */
1624
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1625
}
1626

    
1627
void helper_rfi (void)
1628
{
1629
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1630
           ~((target_ulong)0x783F0000), 1);
1631
}
1632

    
1633
#if defined(TARGET_PPC64)
1634
void helper_rfid (void)
1635
{
1636
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1637
           ~((target_ulong)0x783F0000), 0);
1638
}
1639

    
1640
void helper_hrfid (void)
1641
{
1642
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1643
           ~((target_ulong)0x783F0000), 0);
1644
}
1645
#endif
1646
#endif
1647

    
1648
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1649
{
1650
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1651
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1652
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1653
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1654
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1655
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1656
    }
1657
}
1658

    
1659
#if defined(TARGET_PPC64)
1660
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1661
{
1662
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1663
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1664
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1665
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1666
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1667
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1668
}
1669
#endif
1670

    
1671
/*****************************************************************************/
1672
/* PowerPC 601 specific instructions (POWER bridge) */
1673

    
1674
target_ulong helper_clcs (uint32_t arg)
1675
{
1676
    switch (arg) {
1677
    case 0x0CUL:
1678
        /* Instruction cache line size */
1679
        return env->icache_line_size;
1680
        break;
1681
    case 0x0DUL:
1682
        /* Data cache line size */
1683
        return env->dcache_line_size;
1684
        break;
1685
    case 0x0EUL:
1686
        /* Minimum cache line size */
1687
        return (env->icache_line_size < env->dcache_line_size) ?
1688
                env->icache_line_size : env->dcache_line_size;
1689
        break;
1690
    case 0x0FUL:
1691
        /* Maximum cache line size */
1692
        return (env->icache_line_size > env->dcache_line_size) ?
1693
                env->icache_line_size : env->dcache_line_size;
1694
        break;
1695
    default:
1696
        /* Undefined */
1697
        return 0;
1698
        break;
1699
    }
1700
}
1701

    
1702
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1703
{
1704
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1705

    
1706
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1707
        (int32_t)arg2 == 0) {
1708
        env->spr[SPR_MQ] = 0;
1709
        return INT32_MIN;
1710
    } else {
1711
        env->spr[SPR_MQ] = tmp % arg2;
1712
        return  tmp / (int32_t)arg2;
1713
    }
1714
}
1715

    
1716
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1717
{
1718
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1719

    
1720
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1721
        (int32_t)arg2 == 0) {
1722
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1723
        env->spr[SPR_MQ] = 0;
1724
        return INT32_MIN;
1725
    } else {
1726
        env->spr[SPR_MQ] = tmp % arg2;
1727
        tmp /= (int32_t)arg2;
1728
        if ((int32_t)tmp != tmp) {
1729
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1730
        } else {
1731
            env->xer &= ~(1 << XER_OV);
1732
        }
1733
        return tmp;
1734
    }
1735
}
1736

    
1737
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1738
{
1739
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1740
        (int32_t)arg2 == 0) {
1741
        env->spr[SPR_MQ] = 0;
1742
        return INT32_MIN;
1743
    } else {
1744
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1745
        return (int32_t)arg1 / (int32_t)arg2;
1746
    }
1747
}
1748

    
1749
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1750
{
1751
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1752
        (int32_t)arg2 == 0) {
1753
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->xer &= ~(1 << XER_OV);
1758
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1759
        return (int32_t)arg1 / (int32_t)arg2;
1760
    }
1761
}
1762

    
1763
#if !defined (CONFIG_USER_ONLY)
1764
target_ulong helper_rac (target_ulong addr)
1765
{
1766
    mmu_ctx_t ctx;
1767
    int nb_BATs;
1768
    target_ulong ret = 0;
1769

    
1770
    /* We don't have to generate many instances of this instruction,
1771
     * as rac is supervisor only.
1772
     */
1773
    /* XXX: FIX THIS: Pretend we have no BAT */
1774
    nb_BATs = env->nb_BATs;
1775
    env->nb_BATs = 0;
1776
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1777
        ret = ctx.raddr;
1778
    env->nb_BATs = nb_BATs;
1779
    return ret;
1780
}
1781

    
1782
void helper_rfsvc (void)
1783
{
1784
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1785
}
1786
#endif
1787

    
1788
/*****************************************************************************/
1789
/* 602 specific instructions */
1790
/* mfrom is the most crazy instruction ever seen, imho ! */
1791
/* Real implementation uses a ROM table. Do the same */
1792
/* Extremly decomposed:
1793
 *                      -arg / 256
1794
 * return 256 * log10(10           + 1.0) + 0.5
1795
 */
1796
#if !defined (CONFIG_USER_ONLY)
1797
target_ulong helper_602_mfrom (target_ulong arg)
1798
{
1799
    if (likely(arg < 602)) {
1800
#include "mfrom_table.c"
1801
        return mfrom_ROM_table[arg];
1802
    } else {
1803
        return 0;
1804
    }
1805
}
1806
#endif
1807

    
1808
/*****************************************************************************/
1809
/* Embedded PowerPC specific helpers */
1810

    
1811
/* XXX: to be improved to check access rights when in user-mode */
1812
target_ulong helper_load_dcr (target_ulong dcrn)
1813
{
1814
    uint32_t val = 0;
1815

    
1816
    if (unlikely(env->dcr_env == NULL)) {
1817
        qemu_log("No DCR environment\n");
1818
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1819
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1820
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1821
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1822
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1823
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1824
    }
1825
    return val;
1826
}
1827

    
1828
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1829
{
1830
    if (unlikely(env->dcr_env == NULL)) {
1831
        qemu_log("No DCR environment\n");
1832
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1833
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1834
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1835
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1836
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1837
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1838
    }
1839
}
1840

    
1841
#if !defined(CONFIG_USER_ONLY)
1842
void helper_40x_rfci (void)
1843
{
1844
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1845
           ~((target_ulong)0xFFFF0000), 0);
1846
}
1847

    
1848
void helper_rfci (void)
1849
{
1850
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1851
           ~((target_ulong)0x3FFF0000), 0);
1852
}
1853

    
1854
void helper_rfdi (void)
1855
{
1856
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1857
           ~((target_ulong)0x3FFF0000), 0);
1858
}
1859

    
1860
void helper_rfmci (void)
1861
{
1862
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1863
           ~((target_ulong)0x3FFF0000), 0);
1864
}
1865
#endif
1866

    
1867
/* 440 specific */
1868
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1869
{
1870
    target_ulong mask;
1871
    int i;
1872

    
1873
    i = 1;
1874
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1875
        if ((high & mask) == 0) {
1876
            if (update_Rc) {
1877
                env->crf[0] = 0x4;
1878
            }
1879
            goto done;
1880
        }
1881
        i++;
1882
    }
1883
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1884
        if ((low & mask) == 0) {
1885
            if (update_Rc) {
1886
                env->crf[0] = 0x8;
1887
            }
1888
            goto done;
1889
        }
1890
        i++;
1891
    }
1892
    if (update_Rc) {
1893
        env->crf[0] = 0x2;
1894
    }
1895
 done:
1896
    env->xer = (env->xer & ~0x7F) | i;
1897
    if (update_Rc) {
1898
        env->crf[0] |= xer_so;
1899
    }
1900
    return i;
1901
}
1902

    
1903
/*****************************************************************************/
1904
/* Altivec extension helpers */
1905
#if defined(HOST_WORDS_BIGENDIAN)
1906
#define HI_IDX 0
1907
#define LO_IDX 1
1908
#else
1909
#define HI_IDX 1
1910
#define LO_IDX 0
1911
#endif
1912

    
1913
#if defined(HOST_WORDS_BIGENDIAN)
1914
#define VECTOR_FOR_INORDER_I(index, element)            \
1915
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1916
#else
1917
#define VECTOR_FOR_INORDER_I(index, element)            \
1918
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1919
#endif
1920

    
1921
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1922
 * execute the following block.  */
1923
#define DO_HANDLE_NAN(result, x)                \
1924
    if (float32_is_any_nan(x)) {                                \
1925
        CPU_FloatU __f;                                         \
1926
        __f.f = x;                                              \
1927
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1928
        result = __f.f;                                         \
1929
    } else
1930

    
1931
#define HANDLE_NAN1(result, x)                  \
1932
    DO_HANDLE_NAN(result, x)
1933
#define HANDLE_NAN2(result, x, y)               \
1934
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1935
#define HANDLE_NAN3(result, x, y, z)            \
1936
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1937

    
1938
/* Saturating arithmetic helpers.  */
1939
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1940
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1941
    {                                                                   \
1942
        to_type r;                                                      \
1943
        if (x < (from_type)min) {                                       \
1944
            r = min;                                                    \
1945
            *sat = 1;                                                   \
1946
        } else if (x > (from_type)max) {                                \
1947
            r = max;                                                    \
1948
            *sat = 1;                                                   \
1949
        } else {                                                        \
1950
            r = x;                                                      \
1951
        }                                                               \
1952
        return r;                                                       \
1953
    }
1954
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1955
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1956
    {                                                                   \
1957
        to_type r;                                                      \
1958
        if (x > (from_type)max) {                                       \
1959
            r = max;                                                    \
1960
            *sat = 1;                                                   \
1961
        } else {                                                        \
1962
            r = x;                                                      \
1963
        }                                                               \
1964
        return r;                                                       \
1965
    }
1966
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1967
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1968
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1969

    
1970
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1971
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1972
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1973
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1974
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1975
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1976
#undef SATCVT
1977
#undef SATCVTU
1978

    
1979
#define LVE(name, access, swap, element)                        \
1980
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1981
    {                                                           \
1982
        size_t n_elems = ARRAY_SIZE(r->element);                \
1983
        int adjust = HI_IDX*(n_elems-1);                        \
1984
        int sh = sizeof(r->element[0]) >> 1;                    \
1985
        int index = (addr & 0xf) >> sh;                         \
1986
        if(msr_le) {                                            \
1987
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1988
        } else {                                                        \
1989
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1990
        }                                                               \
1991
    }
1992
#define I(x) (x)
1993
LVE(lvebx, ldub, I, u8)
1994
LVE(lvehx, lduw, bswap16, u16)
1995
LVE(lvewx, ldl, bswap32, u32)
1996
#undef I
1997
#undef LVE
1998

    
1999
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2000
{
2001
    int i, j = (sh & 0xf);
2002

    
2003
    VECTOR_FOR_INORDER_I (i, u8) {
2004
        r->u8[i] = j++;
2005
    }
2006
}
2007

    
2008
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2009
{
2010
    int i, j = 0x10 - (sh & 0xf);
2011

    
2012
    VECTOR_FOR_INORDER_I (i, u8) {
2013
        r->u8[i] = j++;
2014
    }
2015
}
2016

    
2017
#define STVE(name, access, swap, element)                       \
2018
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2019
    {                                                           \
2020
        size_t n_elems = ARRAY_SIZE(r->element);                \
2021
        int adjust = HI_IDX*(n_elems-1);                        \
2022
        int sh = sizeof(r->element[0]) >> 1;                    \
2023
        int index = (addr & 0xf) >> sh;                         \
2024
        if(msr_le) {                                            \
2025
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2026
        } else {                                                        \
2027
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2028
        }                                                               \
2029
    }
2030
#define I(x) (x)
2031
STVE(stvebx, stb, I, u8)
2032
STVE(stvehx, stw, bswap16, u16)
2033
STVE(stvewx, stl, bswap32, u32)
2034
#undef I
2035
#undef LVE
2036

    
2037
void helper_mtvscr (ppc_avr_t *r)
2038
{
2039
#if defined(HOST_WORDS_BIGENDIAN)
2040
    env->vscr = r->u32[3];
2041
#else
2042
    env->vscr = r->u32[0];
2043
#endif
2044
    set_flush_to_zero(vscr_nj, &env->vec_status);
2045
}
2046

    
2047
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2048
{
2049
    int i;
2050
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2051
        r->u32[i] = ~a->u32[i] < b->u32[i];
2052
    }
2053
}
2054

    
2055
#define VARITH_DO(name, op, element)        \
2056
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2057
{                                                                       \
2058
    int i;                                                              \
2059
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2060
        r->element[i] = a->element[i] op b->element[i];                 \
2061
    }                                                                   \
2062
}
2063
#define VARITH(suffix, element)                  \
2064
  VARITH_DO(add##suffix, +, element)             \
2065
  VARITH_DO(sub##suffix, -, element)
2066
VARITH(ubm, u8)
2067
VARITH(uhm, u16)
2068
VARITH(uwm, u32)
2069
#undef VARITH_DO
2070
#undef VARITH
2071

    
2072
#define VARITHFP(suffix, func)                                          \
2073
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2074
    {                                                                   \
2075
        int i;                                                          \
2076
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2077
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2078
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2079
            }                                                           \
2080
        }                                                               \
2081
    }
2082
VARITHFP(addfp, float32_add)
2083
VARITHFP(subfp, float32_sub)
2084
#undef VARITHFP
2085

    
2086
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2087
    {                                                                   \
2088
        type result = (type)a->element[i] op (type)b->element[i];       \
2089
        r->element[i] = cvt(result, &sat);                              \
2090
    }
2091

    
2092
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2093
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2094
    {                                                                   \
2095
        int sat = 0;                                                    \
2096
        int i;                                                          \
2097
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2098
            switch (sizeof(r->element[0])) {                            \
2099
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2100
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2101
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2102
            }                                                           \
2103
        }                                                               \
2104
        if (sat) {                                                      \
2105
            env->vscr |= (1 << VSCR_SAT);                               \
2106
        }                                                               \
2107
    }
2108
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2109
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2110
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2111
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2112
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2113
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2114
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2115
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2116
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2117
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2118
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2119
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2120
#undef VARITHSAT_CASE
2121
#undef VARITHSAT_DO
2122
#undef VARITHSAT_SIGNED
2123
#undef VARITHSAT_UNSIGNED
2124

    
2125
#define VAVG_DO(name, element, etype)                                   \
2126
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2127
    {                                                                   \
2128
        int i;                                                          \
2129
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2130
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2131
            r->element[i] = x >> 1;                                     \
2132
        }                                                               \
2133
    }
2134

    
2135
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2136
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2137
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2138
VAVG(b, s8, int16_t, u8, uint16_t)
2139
VAVG(h, s16, int32_t, u16, uint32_t)
2140
VAVG(w, s32, int64_t, u32, uint64_t)
2141
#undef VAVG_DO
2142
#undef VAVG
2143

    
2144
#define VCF(suffix, cvt, element)                                       \
2145
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2146
    {                                                                   \
2147
        int i;                                                          \
2148
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2149
            float32 t = cvt(b->element[i], &env->vec_status);           \
2150
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2151
        }                                                               \
2152
    }
2153
VCF(ux, uint32_to_float32, u32)
2154
VCF(sx, int32_to_float32, s32)
2155
#undef VCF
2156

    
2157
#define VCMP_DO(suffix, compare, element, record)                       \
2158
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2159
    {                                                                   \
2160
        uint32_t ones = (uint32_t)-1;                                   \
2161
        uint32_t all = ones;                                            \
2162
        uint32_t none = 0;                                              \
2163
        int i;                                                          \
2164
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2165
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2166
            switch (sizeof (a->element[0])) {                           \
2167
            case 4: r->u32[i] = result; break;                          \
2168
            case 2: r->u16[i] = result; break;                          \
2169
            case 1: r->u8[i] = result; break;                           \
2170
            }                                                           \
2171
            all &= result;                                              \
2172
            none |= result;                                             \
2173
        }                                                               \
2174
        if (record) {                                                   \
2175
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2176
        }                                                               \
2177
    }
2178
#define VCMP(suffix, compare, element)          \
2179
    VCMP_DO(suffix, compare, element, 0)        \
2180
    VCMP_DO(suffix##_dot, compare, element, 1)
2181
VCMP(equb, ==, u8)
2182
VCMP(equh, ==, u16)
2183
VCMP(equw, ==, u32)
2184
VCMP(gtub, >, u8)
2185
VCMP(gtuh, >, u16)
2186
VCMP(gtuw, >, u32)
2187
VCMP(gtsb, >, s8)
2188
VCMP(gtsh, >, s16)
2189
VCMP(gtsw, >, s32)
2190
#undef VCMP_DO
2191
#undef VCMP
2192

    
2193
#define VCMPFP_DO(suffix, compare, order, record)                       \
2194
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2195
    {                                                                   \
2196
        uint32_t ones = (uint32_t)-1;                                   \
2197
        uint32_t all = ones;                                            \
2198
        uint32_t none = 0;                                              \
2199
        int i;                                                          \
2200
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2201
            uint32_t result;                                            \
2202
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2203
            if (rel == float_relation_unordered) {                      \
2204
                result = 0;                                             \
2205
            } else if (rel compare order) {                             \
2206
                result = ones;                                          \
2207
            } else {                                                    \
2208
                result = 0;                                             \
2209
            }                                                           \
2210
            r->u32[i] = result;                                         \
2211
            all &= result;                                              \
2212
            none |= result;                                             \
2213
        }                                                               \
2214
        if (record) {                                                   \
2215
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2216
        }                                                               \
2217
    }
2218
#define VCMPFP(suffix, compare, order)           \
2219
    VCMPFP_DO(suffix, compare, order, 0)         \
2220
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2221
VCMPFP(eqfp, ==, float_relation_equal)
2222
VCMPFP(gefp, !=, float_relation_less)
2223
VCMPFP(gtfp, ==, float_relation_greater)
2224
#undef VCMPFP_DO
2225
#undef VCMPFP
2226

    
2227
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2228
                                    int record)
2229
{
2230
    int i;
2231
    int all_in = 0;
2232
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2233
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2234
        if (le_rel == float_relation_unordered) {
2235
            r->u32[i] = 0xc0000000;
2236
            /* ALL_IN does not need to be updated here.  */
2237
        } else {
2238
            float32 bneg = float32_chs(b->f[i]);
2239
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2240
            int le = le_rel != float_relation_greater;
2241
            int ge = ge_rel != float_relation_less;
2242
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2243
            all_in |= (!le | !ge);
2244
        }
2245
    }
2246
    if (record) {
2247
        env->crf[6] = (all_in == 0) << 1;
2248
    }
2249
}
2250

    
2251
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2252
{
2253
    vcmpbfp_internal(r, a, b, 0);
2254
}
2255

    
2256
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2257
{
2258
    vcmpbfp_internal(r, a, b, 1);
2259
}
2260

    
2261
#define VCT(suffix, satcvt, element)                                    \
2262
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2263
    {                                                                   \
2264
        int i;                                                          \
2265
        int sat = 0;                                                    \
2266
        float_status s = env->vec_status;                               \
2267
        set_float_rounding_mode(float_round_to_zero, &s);               \
2268
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2269
            if (float32_is_any_nan(b->f[i])) {                          \
2270
                r->element[i] = 0;                                      \
2271
            } else {                                                    \
2272
                float64 t = float32_to_float64(b->f[i], &s);            \
2273
                int64_t j;                                              \
2274
                t = float64_scalbn(t, uim, &s);                         \
2275
                j = float64_to_int64(t, &s);                            \
2276
                r->element[i] = satcvt(j, &sat);                        \
2277
            }                                                           \
2278
        }                                                               \
2279
        if (sat) {                                                      \
2280
            env->vscr |= (1 << VSCR_SAT);                               \
2281
        }                                                               \
2282
    }
2283
VCT(uxs, cvtsduw, u32)
2284
VCT(sxs, cvtsdsw, s32)
2285
#undef VCT
2286

    
2287
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2288
{
2289
    int i;
2290
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2291
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2292
            /* Need to do the computation in higher precision and round
2293
             * once at the end.  */
2294
            float64 af, bf, cf, t;
2295
            af = float32_to_float64(a->f[i], &env->vec_status);
2296
            bf = float32_to_float64(b->f[i], &env->vec_status);
2297
            cf = float32_to_float64(c->f[i], &env->vec_status);
2298
            t = float64_mul(af, cf, &env->vec_status);
2299
            t = float64_add(t, bf, &env->vec_status);
2300
            r->f[i] = float64_to_float32(t, &env->vec_status);
2301
        }
2302
    }
2303
}
2304

    
2305
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2306
{
2307
    int sat = 0;
2308
    int i;
2309

    
2310
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2311
        int32_t prod = a->s16[i] * b->s16[i];
2312
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2313
        r->s16[i] = cvtswsh (t, &sat);
2314
    }
2315

    
2316
    if (sat) {
2317
        env->vscr |= (1 << VSCR_SAT);
2318
    }
2319
}
2320

    
2321
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2322
{
2323
    int sat = 0;
2324
    int i;
2325

    
2326
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2327
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2328
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2329
        r->s16[i] = cvtswsh (t, &sat);
2330
    }
2331

    
2332
    if (sat) {
2333
        env->vscr |= (1 << VSCR_SAT);
2334
    }
2335
}
2336

    
2337
#define VMINMAX_DO(name, compare, element)                              \
2338
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2339
    {                                                                   \
2340
        int i;                                                          \
2341
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2342
            if (a->element[i] compare b->element[i]) {                  \
2343
                r->element[i] = b->element[i];                          \
2344
            } else {                                                    \
2345
                r->element[i] = a->element[i];                          \
2346
            }                                                           \
2347
        }                                                               \
2348
    }
2349
#define VMINMAX(suffix, element)                \
2350
  VMINMAX_DO(min##suffix, >, element)           \
2351
  VMINMAX_DO(max##suffix, <, element)
2352
VMINMAX(sb, s8)
2353
VMINMAX(sh, s16)
2354
VMINMAX(sw, s32)
2355
VMINMAX(ub, u8)
2356
VMINMAX(uh, u16)
2357
VMINMAX(uw, u32)
2358
#undef VMINMAX_DO
2359
#undef VMINMAX
2360

    
2361
#define VMINMAXFP(suffix, rT, rF)                                       \
2362
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2363
    {                                                                   \
2364
        int i;                                                          \
2365
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2366
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2367
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2368
                    r->f[i] = rT->f[i];                                 \
2369
                } else {                                                \
2370
                    r->f[i] = rF->f[i];                                 \
2371
                }                                                       \
2372
            }                                                           \
2373
        }                                                               \
2374
    }
2375
VMINMAXFP(minfp, a, b)
2376
VMINMAXFP(maxfp, b, a)
2377
#undef VMINMAXFP
2378

    
2379
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2380
{
2381
    int i;
2382
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2383
        int32_t prod = a->s16[i] * b->s16[i];
2384
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2385
    }
2386
}
2387

    
2388
#define VMRG_DO(name, element, highp)                                   \
2389
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2390
    {                                                                   \
2391
        ppc_avr_t result;                                               \
2392
        int i;                                                          \
2393
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2394
        for (i = 0; i < n_elems/2; i++) {                               \
2395
            if (highp) {                                                \
2396
                result.element[i*2+HI_IDX] = a->element[i];             \
2397
                result.element[i*2+LO_IDX] = b->element[i];             \
2398
            } else {                                                    \
2399
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2400
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2401
            }                                                           \
2402
        }                                                               \
2403
        *r = result;                                                    \
2404
    }
2405
#if defined(HOST_WORDS_BIGENDIAN)
2406
#define MRGHI 0
2407
#define MRGLO 1
2408
#else
2409
#define MRGHI 1
2410
#define MRGLO 0
2411
#endif
2412
#define VMRG(suffix, element)                   \
2413
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2414
  VMRG_DO(mrgh##suffix, element, MRGLO)
2415
VMRG(b, u8)
2416
VMRG(h, u16)
2417
VMRG(w, u32)
2418
#undef VMRG_DO
2419
#undef VMRG
2420
#undef MRGHI
2421
#undef MRGLO
2422

    
2423
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2424
{
2425
    int32_t prod[16];
2426
    int i;
2427

    
2428
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2429
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2430
    }
2431

    
2432
    VECTOR_FOR_INORDER_I(i, s32) {
2433
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2434
    }
2435
}
2436

    
2437
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2438
{
2439
    int32_t prod[8];
2440
    int i;
2441

    
2442
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2443
        prod[i] = a->s16[i] * b->s16[i];
2444
    }
2445

    
2446
    VECTOR_FOR_INORDER_I(i, s32) {
2447
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2448
    }
2449
}
2450

    
2451
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2452
{
2453
    int32_t prod[8];
2454
    int i;
2455
    int sat = 0;
2456

    
2457
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2458
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2459
    }
2460

    
2461
    VECTOR_FOR_INORDER_I (i, s32) {
2462
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2463
        r->u32[i] = cvtsdsw(t, &sat);
2464
    }
2465

    
2466
    if (sat) {
2467
        env->vscr |= (1 << VSCR_SAT);
2468
    }
2469
}
2470

    
2471
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2472
{
2473
    uint16_t prod[16];
2474
    int i;
2475

    
2476
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2477
        prod[i] = a->u8[i] * b->u8[i];
2478
    }
2479

    
2480
    VECTOR_FOR_INORDER_I(i, u32) {
2481
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2482
    }
2483
}
2484

    
2485
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2486
{
2487
    uint32_t prod[8];
2488
    int i;
2489

    
2490
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2491
        prod[i] = a->u16[i] * b->u16[i];
2492
    }
2493

    
2494
    VECTOR_FOR_INORDER_I(i, u32) {
2495
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2496
    }
2497
}
2498

    
2499
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2500
{
2501
    uint32_t prod[8];
2502
    int i;
2503
    int sat = 0;
2504

    
2505
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2506
        prod[i] = a->u16[i] * b->u16[i];
2507
    }
2508

    
2509
    VECTOR_FOR_INORDER_I (i, s32) {
2510
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2511
        r->u32[i] = cvtuduw(t, &sat);
2512
    }
2513

    
2514
    if (sat) {
2515
        env->vscr |= (1 << VSCR_SAT);
2516
    }
2517
}
2518

    
2519
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2520
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2521
    {                                                                   \
2522
        int i;                                                          \
2523
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2524
            if (evenp) {                                                \
2525
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2526
            } else {                                                    \
2527
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2528
            }                                                           \
2529
        }                                                               \
2530
    }
2531
#define VMUL(suffix, mul_element, prod_element) \
2532
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2533
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2534
VMUL(sb, s8, s16)
2535
VMUL(sh, s16, s32)
2536
VMUL(ub, u8, u16)
2537
VMUL(uh, u16, u32)
2538
#undef VMUL_DO
2539
#undef VMUL
2540

    
2541
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2542
{
2543
    int i;
2544
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2545
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2546
            /* Need to do the computation is higher precision and round
2547
             * once at the end.  */
2548
            float64 af, bf, cf, t;
2549
            af = float32_to_float64(a->f[i], &env->vec_status);
2550
            bf = float32_to_float64(b->f[i], &env->vec_status);
2551
            cf = float32_to_float64(c->f[i], &env->vec_status);
2552
            t = float64_mul(af, cf, &env->vec_status);
2553
            t = float64_sub(t, bf, &env->vec_status);
2554
            t = float64_chs(t);
2555
            r->f[i] = float64_to_float32(t, &env->vec_status);
2556
        }
2557
    }
2558
}
2559

    
2560
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2561
{
2562
    ppc_avr_t result;
2563
    int i;
2564
    VECTOR_FOR_INORDER_I (i, u8) {
2565
        int s = c->u8[i] & 0x1f;
2566
#if defined(HOST_WORDS_BIGENDIAN)
2567
        int index = s & 0xf;
2568
#else
2569
        int index = 15 - (s & 0xf);
2570
#endif
2571
        if (s & 0x10) {
2572
            result.u8[i] = b->u8[index];
2573
        } else {
2574
            result.u8[i] = a->u8[index];
2575
        }
2576
    }
2577
    *r = result;
2578
}
2579

    
2580
#if defined(HOST_WORDS_BIGENDIAN)
2581
#define PKBIG 1
2582
#else
2583
#define PKBIG 0
2584
#endif
2585
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2586
{
2587
    int i, j;
2588
    ppc_avr_t result;
2589
#if defined(HOST_WORDS_BIGENDIAN)
2590
    const ppc_avr_t *x[2] = { a, b };
2591
#else
2592
    const ppc_avr_t *x[2] = { b, a };
2593
#endif
2594

    
2595
    VECTOR_FOR_INORDER_I (i, u64) {
2596
        VECTOR_FOR_INORDER_I (j, u32){
2597
            uint32_t e = x[i]->u32[j];
2598
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2599
                                 ((e >> 6) & 0x3e0) |
2600
                                 ((e >> 3) & 0x1f));
2601
        }
2602
    }
2603
    *r = result;
2604
}
2605

    
2606
#define VPK(suffix, from, to, cvt, dosat)       \
2607
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2608
    {                                                                   \
2609
        int i;                                                          \
2610
        int sat = 0;                                                    \
2611
        ppc_avr_t result;                                               \
2612
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2613
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2614
        VECTOR_FOR_INORDER_I (i, from) {                                \
2615
            result.to[i] = cvt(a0->from[i], &sat);                      \
2616
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2617
        }                                                               \
2618
        *r = result;                                                    \
2619
        if (dosat && sat) {                                             \
2620
            env->vscr |= (1 << VSCR_SAT);                               \
2621
        }                                                               \
2622
    }
2623
#define I(x, y) (x)
2624
VPK(shss, s16, s8, cvtshsb, 1)
2625
VPK(shus, s16, u8, cvtshub, 1)
2626
VPK(swss, s32, s16, cvtswsh, 1)
2627
VPK(swus, s32, u16, cvtswuh, 1)
2628
VPK(uhus, u16, u8, cvtuhub, 1)
2629
VPK(uwus, u32, u16, cvtuwuh, 1)
2630
VPK(uhum, u16, u8, I, 0)
2631
VPK(uwum, u32, u16, I, 0)
2632
#undef I
2633
#undef VPK
2634
#undef PKBIG
2635

    
2636
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2637
{
2638
    int i;
2639
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2640
        HANDLE_NAN1(r->f[i], b->f[i]) {
2641
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2642
        }
2643
    }
2644
}
2645

    
2646
#define VRFI(suffix, rounding)                                          \
2647
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2648
    {                                                                   \
2649
        int i;                                                          \
2650
        float_status s = env->vec_status;                               \
2651
        set_float_rounding_mode(rounding, &s);                          \
2652
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2653
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2654
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2655
            }                                                           \
2656
        }                                                               \
2657
    }
2658
VRFI(n, float_round_nearest_even)
2659
VRFI(m, float_round_down)
2660
VRFI(p, float_round_up)
2661
VRFI(z, float_round_to_zero)
2662
#undef VRFI
2663

    
2664
#define VROTATE(suffix, element)                                        \
2665
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2666
    {                                                                   \
2667
        int i;                                                          \
2668
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2669
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2670
            unsigned int shift = b->element[i] & mask;                  \
2671
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2672
        }                                                               \
2673
    }
2674
VROTATE(b, u8)
2675
VROTATE(h, u16)
2676
VROTATE(w, u32)
2677
#undef VROTATE
2678

    
2679
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2680
{
2681
    int i;
2682
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2683
        HANDLE_NAN1(r->f[i], b->f[i]) {
2684
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2685
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2686
        }
2687
    }
2688
}
2689

    
2690
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2691
{
2692
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2693
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2694
}
2695

    
2696
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2697
{
2698
    int i;
2699
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2700
        HANDLE_NAN1(r->f[i], b->f[i]) {
2701
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2702
        }
2703
    }
2704
}
2705

    
2706
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2707
{
2708
    int i;
2709
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2710
        HANDLE_NAN1(r->f[i], b->f[i]) {
2711
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2712
        }
2713
    }
2714
}
2715

    
2716
#if defined(HOST_WORDS_BIGENDIAN)
2717
#define LEFT 0
2718
#define RIGHT 1
2719
#else
2720
#define LEFT 1
2721
#define RIGHT 0
2722
#endif
2723
/* The specification says that the results are undefined if all of the
2724
 * shift counts are not identical.  We check to make sure that they are
2725
 * to conform to what real hardware appears to do.  */
2726
#define VSHIFT(suffix, leftp)                                           \
2727
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2728
    {                                                                   \
2729
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2730
        int doit = 1;                                                   \
2731
        int i;                                                          \
2732
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2733
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2734
        }                                                               \
2735
        if (doit) {                                                     \
2736
            if (shift == 0) {                                           \
2737
                *r = *a;                                                \
2738
            } else if (leftp) {                                         \
2739
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2740
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2741
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2742
            } else {                                                    \
2743
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2744
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2745
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2746
            }                                                           \
2747
        }                                                               \
2748
    }
2749
VSHIFT(l, LEFT)
2750
VSHIFT(r, RIGHT)
2751
#undef VSHIFT
2752
#undef LEFT
2753
#undef RIGHT
2754

    
2755
#define VSL(suffix, element)                                            \
2756
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2757
    {                                                                   \
2758
        int i;                                                          \
2759
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2760
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2761
            unsigned int shift = b->element[i] & mask;                  \
2762
            r->element[i] = a->element[i] << shift;                     \
2763
        }                                                               \
2764
    }
2765
VSL(b, u8)
2766
VSL(h, u16)
2767
VSL(w, u32)
2768
#undef VSL
2769

    
2770
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2771
{
2772
    int sh = shift & 0xf;
2773
    int i;
2774
    ppc_avr_t result;
2775

    
2776
#if defined(HOST_WORDS_BIGENDIAN)
2777
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2778
        int index = sh + i;
2779
        if (index > 0xf) {
2780
            result.u8[i] = b->u8[index-0x10];
2781
        } else {
2782
            result.u8[i] = a->u8[index];
2783
        }
2784
    }
2785
#else
2786
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2787
        int index = (16 - sh) + i;
2788
        if (index > 0xf) {
2789
            result.u8[i] = a->u8[index-0x10];
2790
        } else {
2791
            result.u8[i] = b->u8[index];
2792
        }
2793
    }
2794
#endif
2795
    *r = result;
2796
}
2797

    
2798
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2799
{
2800
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2801

    
2802
#if defined (HOST_WORDS_BIGENDIAN)
2803
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2804
  memset (&r->u8[16-sh], 0, sh);
2805
#else
2806
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2807
  memset (&r->u8[0], 0, sh);
2808
#endif
2809
}
2810

    
2811
/* Experimental testing shows that hardware masks the immediate.  */
2812
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2813
#if defined(HOST_WORDS_BIGENDIAN)
2814
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2815
#else
2816
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2817
#endif
2818
#define VSPLT(suffix, element)                                          \
2819
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2820
    {                                                                   \
2821
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2822
        int i;                                                          \
2823
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2824
            r->element[i] = s;                                          \
2825
        }                                                               \
2826
    }
2827
VSPLT(b, u8)
2828
VSPLT(h, u16)
2829
VSPLT(w, u32)
2830
#undef VSPLT
2831
#undef SPLAT_ELEMENT
2832
#undef _SPLAT_MASKED
2833

    
2834
#define VSPLTI(suffix, element, splat_type)                     \
2835
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2836
    {                                                           \
2837
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2838
        int i;                                                  \
2839
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2840
            r->element[i] = x;                                  \
2841
        }                                                       \
2842
    }
2843
VSPLTI(b, s8, int8_t)
2844
VSPLTI(h, s16, int16_t)
2845
VSPLTI(w, s32, int32_t)
2846
#undef VSPLTI
2847

    
2848
#define VSR(suffix, element)                                            \
2849
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2850
    {                                                                   \
2851
        int i;                                                          \
2852
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2853
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2854
            unsigned int shift = b->element[i] & mask;                  \
2855
            r->element[i] = a->element[i] >> shift;                     \
2856
        }                                                               \
2857
    }
2858
VSR(ab, s8)
2859
VSR(ah, s16)
2860
VSR(aw, s32)
2861
VSR(b, u8)
2862
VSR(h, u16)
2863
VSR(w, u32)
2864
#undef VSR
2865

    
2866
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2867
{
2868
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2869

    
2870
#if defined (HOST_WORDS_BIGENDIAN)
2871
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2872
  memset (&r->u8[0], 0, sh);
2873
#else
2874
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2875
  memset (&r->u8[16-sh], 0, sh);
2876
#endif
2877
}
2878

    
2879
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2880
{
2881
    int i;
2882
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2883
        r->u32[i] = a->u32[i] >= b->u32[i];
2884
    }
2885
}
2886

    
2887
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2888
{
2889
    int64_t t;
2890
    int i, upper;
2891
    ppc_avr_t result;
2892
    int sat = 0;
2893

    
2894
#if defined(HOST_WORDS_BIGENDIAN)
2895
    upper = ARRAY_SIZE(r->s32)-1;
2896
#else
2897
    upper = 0;
2898
#endif
2899
    t = (int64_t)b->s32[upper];
2900
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2901
        t += a->s32[i];
2902
        result.s32[i] = 0;
2903
    }
2904
    result.s32[upper] = cvtsdsw(t, &sat);
2905
    *r = result;
2906

    
2907
    if (sat) {
2908
        env->vscr |= (1 << VSCR_SAT);
2909
    }
2910
}
2911

    
2912
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2913
{
2914
    int i, j, upper;
2915
    ppc_avr_t result;
2916
    int sat = 0;
2917

    
2918
#if defined(HOST_WORDS_BIGENDIAN)
2919
    upper = 1;
2920
#else
2921
    upper = 0;
2922
#endif
2923
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2924
        int64_t t = (int64_t)b->s32[upper+i*2];
2925
        result.u64[i] = 0;
2926
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2927
            t += a->s32[2*i+j];
2928
        }
2929
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2930
    }
2931

    
2932
    *r = result;
2933
    if (sat) {
2934
        env->vscr |= (1 << VSCR_SAT);
2935
    }
2936
}
2937

    
2938
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2939
{
2940
    int i, j;
2941
    int sat = 0;
2942

    
2943
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2944
        int64_t t = (int64_t)b->s32[i];
2945
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2946
            t += a->s8[4*i+j];
2947
        }
2948
        r->s32[i] = cvtsdsw(t, &sat);
2949
    }
2950

    
2951
    if (sat) {
2952
        env->vscr |= (1 << VSCR_SAT);
2953
    }
2954
}
2955

    
2956
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2957
{
2958
    int sat = 0;
2959
    int i;
2960

    
2961
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2962
        int64_t t = (int64_t)b->s32[i];
2963
        t += a->s16[2*i] + a->s16[2*i+1];
2964
        r->s32[i] = cvtsdsw(t, &sat);
2965
    }
2966

    
2967
    if (sat) {
2968
        env->vscr |= (1 << VSCR_SAT);
2969
    }
2970
}
2971

    
2972
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2973
{
2974
    int i, j;
2975
    int sat = 0;
2976

    
2977
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2978
        uint64_t t = (uint64_t)b->u32[i];
2979
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2980
            t += a->u8[4*i+j];
2981
        }
2982
        r->u32[i] = cvtuduw(t, &sat);
2983
    }
2984

    
2985
    if (sat) {
2986
        env->vscr |= (1 << VSCR_SAT);
2987
    }
2988
}
2989

    
2990
#if defined(HOST_WORDS_BIGENDIAN)
2991
#define UPKHI 1
2992
#define UPKLO 0
2993
#else
2994
#define UPKHI 0
2995
#define UPKLO 1
2996
#endif
2997
#define VUPKPX(suffix, hi)                                      \
2998
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2999
    {                                                           \
3000
        int i;                                                  \
3001
        ppc_avr_t result;                                       \
3002
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3003
            uint16_t e = b->u16[hi ? i : i+4];                  \
3004
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3005
            uint8_t r = (e >> 10) & 0x1f;                       \
3006
            uint8_t g = (e >> 5) & 0x1f;                        \
3007
            uint8_t b = e & 0x1f;                               \
3008
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3009
        }                                                               \
3010
        *r = result;                                                    \
3011
    }
3012
VUPKPX(lpx, UPKLO)
3013
VUPKPX(hpx, UPKHI)
3014
#undef VUPKPX
3015

    
3016
#define VUPK(suffix, unpacked, packee, hi)                              \
3017
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3018
    {                                                                   \
3019
        int i;                                                          \
3020
        ppc_avr_t result;                                               \
3021
        if (hi) {                                                       \
3022
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3023
                result.unpacked[i] = b->packee[i];                      \
3024
            }                                                           \
3025
        } else {                                                        \
3026
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3027
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3028
            }                                                           \
3029
        }                                                               \
3030
        *r = result;                                                    \
3031
    }
3032
VUPK(hsb, s16, s8, UPKHI)
3033
VUPK(hsh, s32, s16, UPKHI)
3034
VUPK(lsb, s16, s8, UPKLO)
3035
VUPK(lsh, s32, s16, UPKLO)
3036
#undef VUPK
3037
#undef UPKHI
3038
#undef UPKLO
3039

    
3040
#undef DO_HANDLE_NAN
3041
#undef HANDLE_NAN1
3042
#undef HANDLE_NAN2
3043
#undef HANDLE_NAN3
3044
#undef VECTOR_FOR_INORDER_I
3045
#undef HI_IDX
3046
#undef LO_IDX
3047

    
3048
/*****************************************************************************/
3049
/* SPE extension helpers */
3050
/* Use a table to make this quicker */
3051
static uint8_t hbrev[16] = {
3052
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3053
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3054
};
3055

    
3056
static inline uint8_t byte_reverse(uint8_t val)
3057
{
3058
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3059
}
3060

    
3061
static inline uint32_t word_reverse(uint32_t val)
3062
{
3063
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3064
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3065
}
3066

    
3067
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3068
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3069
{
3070
    uint32_t a, b, d, mask;
3071

    
3072
    mask = UINT32_MAX >> (32 - MASKBITS);
3073
    a = arg1 & mask;
3074
    b = arg2 & mask;
3075
    d = word_reverse(1 + word_reverse(a | ~b));
3076
    return (arg1 & ~mask) | (d & b);
3077
}
3078

    
3079
uint32_t helper_cntlsw32 (uint32_t val)
3080
{
3081
    if (val & 0x80000000)
3082
        return clz32(~val);
3083
    else
3084
        return clz32(val);
3085
}
3086

    
3087
uint32_t helper_cntlzw32 (uint32_t val)
3088
{
3089
    return clz32(val);
3090
}
3091

    
3092
/* Single-precision floating-point conversions */
3093
static inline uint32_t efscfsi(uint32_t val)
3094
{
3095
    CPU_FloatU u;
3096

    
3097
    u.f = int32_to_float32(val, &env->vec_status);
3098

    
3099
    return u.l;
3100
}
3101

    
3102
static inline uint32_t efscfui(uint32_t val)
3103
{
3104
    CPU_FloatU u;
3105

    
3106
    u.f = uint32_to_float32(val, &env->vec_status);
3107

    
3108
    return u.l;
3109
}
3110

    
3111
static inline int32_t efsctsi(uint32_t val)
3112
{
3113
    CPU_FloatU u;
3114

    
3115
    u.l = val;
3116
    /* NaN are not treated the same way IEEE 754 does */
3117
    if (unlikely(float32_is_quiet_nan(u.f)))
3118
        return 0;
3119

    
3120
    return float32_to_int32(u.f, &env->vec_status);
3121
}
3122

    
3123
static inline uint32_t efsctui(uint32_t val)
3124
{
3125
    CPU_FloatU u;
3126

    
3127
    u.l = val;
3128
    /* NaN are not treated the same way IEEE 754 does */
3129
    if (unlikely(float32_is_quiet_nan(u.f)))
3130
        return 0;
3131

    
3132
    return float32_to_uint32(u.f, &env->vec_status);
3133
}
3134

    
3135
static inline uint32_t efsctsiz(uint32_t val)
3136
{
3137
    CPU_FloatU u;
3138

    
3139
    u.l = val;
3140
    /* NaN are not treated the same way IEEE 754 does */
3141
    if (unlikely(float32_is_quiet_nan(u.f)))
3142
        return 0;
3143

    
3144
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3145
}
3146

    
3147
static inline uint32_t efsctuiz(uint32_t val)
3148
{
3149
    CPU_FloatU u;
3150

    
3151
    u.l = val;
3152
    /* NaN are not treated the same way IEEE 754 does */
3153
    if (unlikely(float32_is_quiet_nan(u.f)))
3154
        return 0;
3155

    
3156
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3157
}
3158

    
3159
static inline uint32_t efscfsf(uint32_t val)
3160
{
3161
    CPU_FloatU u;
3162
    float32 tmp;
3163

    
3164
    u.f = int32_to_float32(val, &env->vec_status);
3165
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3166
    u.f = float32_div(u.f, tmp, &env->vec_status);
3167

    
3168
    return u.l;
3169
}
3170

    
3171
static inline uint32_t efscfuf(uint32_t val)
3172
{
3173
    CPU_FloatU u;
3174
    float32 tmp;
3175

    
3176
    u.f = uint32_to_float32(val, &env->vec_status);
3177
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3178
    u.f = float32_div(u.f, tmp, &env->vec_status);
3179

    
3180
    return u.l;
3181
}
3182

    
3183
static inline uint32_t efsctsf(uint32_t val)
3184
{
3185
    CPU_FloatU u;
3186
    float32 tmp;
3187

    
3188
    u.l = val;
3189
    /* NaN are not treated the same way IEEE 754 does */
3190
    if (unlikely(float32_is_quiet_nan(u.f)))
3191
        return 0;
3192
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3193
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3194

    
3195
    return float32_to_int32(u.f, &env->vec_status);
3196
}
3197

    
3198
static inline uint32_t efsctuf(uint32_t val)
3199
{
3200
    CPU_FloatU u;
3201
    float32 tmp;
3202

    
3203
    u.l = val;
3204
    /* NaN are not treated the same way IEEE 754 does */
3205
    if (unlikely(float32_is_quiet_nan(u.f)))
3206
        return 0;
3207
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3208
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3209

    
3210
    return float32_to_uint32(u.f, &env->vec_status);
3211
}
3212

    
3213
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3214
uint32_t helper_e##name (uint32_t val)                                        \
3215
{                                                                             \
3216
    return e##name(val);                                                      \
3217
}
3218
/* efscfsi */
3219
HELPER_SPE_SINGLE_CONV(fscfsi);
3220
/* efscfui */
3221
HELPER_SPE_SINGLE_CONV(fscfui);
3222
/* efscfuf */
3223
HELPER_SPE_SINGLE_CONV(fscfuf);
3224
/* efscfsf */
3225
HELPER_SPE_SINGLE_CONV(fscfsf);
3226
/* efsctsi */
3227
HELPER_SPE_SINGLE_CONV(fsctsi);
3228
/* efsctui */
3229
HELPER_SPE_SINGLE_CONV(fsctui);
3230
/* efsctsiz */
3231
HELPER_SPE_SINGLE_CONV(fsctsiz);
3232
/* efsctuiz */
3233
HELPER_SPE_SINGLE_CONV(fsctuiz);
3234
/* efsctsf */
3235
HELPER_SPE_SINGLE_CONV(fsctsf);
3236
/* efsctuf */
3237
HELPER_SPE_SINGLE_CONV(fsctuf);
3238

    
3239
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3240
uint64_t helper_ev##name (uint64_t val)                                       \
3241
{                                                                             \
3242
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3243
            (uint64_t)e##name(val);                                           \
3244
}
3245
/* evfscfsi */
3246
HELPER_SPE_VECTOR_CONV(fscfsi);
3247
/* evfscfui */
3248
HELPER_SPE_VECTOR_CONV(fscfui);
3249
/* evfscfuf */
3250
HELPER_SPE_VECTOR_CONV(fscfuf);
3251
/* evfscfsf */
3252
HELPER_SPE_VECTOR_CONV(fscfsf);
3253
/* evfsctsi */
3254
HELPER_SPE_VECTOR_CONV(fsctsi);
3255
/* evfsctui */
3256
HELPER_SPE_VECTOR_CONV(fsctui);
3257
/* evfsctsiz */
3258
HELPER_SPE_VECTOR_CONV(fsctsiz);
3259
/* evfsctuiz */
3260
HELPER_SPE_VECTOR_CONV(fsctuiz);
3261
/* evfsctsf */
3262
HELPER_SPE_VECTOR_CONV(fsctsf);
3263
/* evfsctuf */
3264
HELPER_SPE_VECTOR_CONV(fsctuf);
3265

    
3266
/* Single-precision floating-point arithmetic */
3267
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3268
{
3269
    CPU_FloatU u1, u2;
3270
    u1.l = op1;
3271
    u2.l = op2;
3272
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3273
    return u1.l;
3274
}
3275

    
3276
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3277
{
3278
    CPU_FloatU u1, u2;
3279
    u1.l = op1;
3280
    u2.l = op2;
3281
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3282
    return u1.l;
3283
}
3284

    
3285
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3286
{
3287
    CPU_FloatU u1, u2;
3288
    u1.l = op1;
3289
    u2.l = op2;
3290
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3291
    return u1.l;
3292
}
3293

    
3294
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3295
{
3296
    CPU_FloatU u1, u2;
3297
    u1.l = op1;
3298
    u2.l = op2;
3299
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3300
    return u1.l;
3301
}
3302

    
3303
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3304
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3305
{                                                                             \
3306
    return e##name(op1, op2);                                                 \
3307
}
3308
/* efsadd */
3309
HELPER_SPE_SINGLE_ARITH(fsadd);
3310
/* efssub */
3311
HELPER_SPE_SINGLE_ARITH(fssub);
3312
/* efsmul */
3313
HELPER_SPE_SINGLE_ARITH(fsmul);
3314
/* efsdiv */
3315
HELPER_SPE_SINGLE_ARITH(fsdiv);
3316

    
3317
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3318
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3319
{                                                                             \
3320
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3321
            (uint64_t)e##name(op1, op2);                                      \
3322
}
3323
/* evfsadd */
3324
HELPER_SPE_VECTOR_ARITH(fsadd);
3325
/* evfssub */
3326
HELPER_SPE_VECTOR_ARITH(fssub);
3327
/* evfsmul */
3328
HELPER_SPE_VECTOR_ARITH(fsmul);
3329
/* evfsdiv */
3330
HELPER_SPE_VECTOR_ARITH(fsdiv);
3331

    
3332
/* Single-precision floating-point comparisons */
3333
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3334
{
3335
    CPU_FloatU u1, u2;
3336
    u1.l = op1;
3337
    u2.l = op2;
3338
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3339
}
3340

    
3341
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3342
{
3343
    CPU_FloatU u1, u2;
3344
    u1.l = op1;
3345
    u2.l = op2;
3346
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3347
}
3348

    
3349
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3350
{
3351
    CPU_FloatU u1, u2;
3352
    u1.l = op1;
3353
    u2.l = op2;
3354
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3355
}
3356

    
3357
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3358
{
3359
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3360
    return efststlt(op1, op2);
3361
}
3362

    
3363
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3364
{
3365
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3366
    return efststgt(op1, op2);
3367
}
3368

    
3369
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3370
{
3371
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3372
    return efststeq(op1, op2);
3373
}
3374

    
3375
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3376
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3377
{                                                                             \
3378
    return e##name(op1, op2) << 2;                                            \
3379
}
3380
/* efststlt */
3381
HELPER_SINGLE_SPE_CMP(fststlt);
3382
/* efststgt */
3383
HELPER_SINGLE_SPE_CMP(fststgt);
3384
/* efststeq */
3385
HELPER_SINGLE_SPE_CMP(fststeq);
3386
/* efscmplt */
3387
HELPER_SINGLE_SPE_CMP(fscmplt);
3388
/* efscmpgt */
3389
HELPER_SINGLE_SPE_CMP(fscmpgt);
3390
/* efscmpeq */
3391
HELPER_SINGLE_SPE_CMP(fscmpeq);
3392

    
3393
static inline uint32_t evcmp_merge(int t0, int t1)
3394
{
3395
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3396
}
3397

    
3398
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3399
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3400
{                                                                             \
3401
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3402
}
3403
/* evfststlt */
3404
HELPER_VECTOR_SPE_CMP(fststlt);
3405
/* evfststgt */
3406
HELPER_VECTOR_SPE_CMP(fststgt);
3407
/* evfststeq */
3408
HELPER_VECTOR_SPE_CMP(fststeq);
3409
/* evfscmplt */
3410
HELPER_VECTOR_SPE_CMP(fscmplt);
3411
/* evfscmpgt */
3412
HELPER_VECTOR_SPE_CMP(fscmpgt);
3413
/* evfscmpeq */
3414
HELPER_VECTOR_SPE_CMP(fscmpeq);
3415

    
3416
/* Double-precision floating-point conversion */
3417
uint64_t helper_efdcfsi (uint32_t val)
3418
{
3419
    CPU_DoubleU u;
3420

    
3421
    u.d = int32_to_float64(val, &env->vec_status);
3422

    
3423
    return u.ll;
3424
}
3425

    
3426
uint64_t helper_efdcfsid (uint64_t val)
3427
{
3428
    CPU_DoubleU u;
3429

    
3430
    u.d = int64_to_float64(val, &env->vec_status);
3431

    
3432
    return u.ll;
3433
}
3434

    
3435
uint64_t helper_efdcfui (uint32_t val)
3436
{
3437
    CPU_DoubleU u;
3438

    
3439
    u.d = uint32_to_float64(val, &env->vec_status);
3440

    
3441
    return u.ll;
3442
}
3443

    
3444
uint64_t helper_efdcfuid (uint64_t val)
3445
{
3446
    CPU_DoubleU u;
3447

    
3448
    u.d = uint64_to_float64(val, &env->vec_status);
3449

    
3450
    return u.ll;
3451
}
3452

    
3453
uint32_t helper_efdctsi (uint64_t val)
3454
{
3455
    CPU_DoubleU u;
3456

    
3457
    u.ll = val;
3458
    /* NaN are not treated the same way IEEE 754 does */
3459
    if (unlikely(float64_is_any_nan(u.d))) {
3460
        return 0;
3461
    }
3462

    
3463
    return float64_to_int32(u.d, &env->vec_status);
3464
}
3465

    
3466
uint32_t helper_efdctui (uint64_t val)
3467
{
3468
    CPU_DoubleU u;
3469

    
3470
    u.ll = val;
3471
    /* NaN are not treated the same way IEEE 754 does */
3472
    if (unlikely(float64_is_any_nan(u.d))) {
3473
        return 0;
3474
    }
3475

    
3476
    return float64_to_uint32(u.d, &env->vec_status);
3477
}
3478

    
3479
uint32_t helper_efdctsiz (uint64_t val)
3480
{
3481
    CPU_DoubleU u;
3482

    
3483
    u.ll = val;
3484
    /* NaN are not treated the same way IEEE 754 does */
3485
    if (unlikely(float64_is_any_nan(u.d))) {
3486
        return 0;
3487
    }
3488

    
3489
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3490
}
3491

    
3492
uint64_t helper_efdctsidz (uint64_t val)
3493
{
3494
    CPU_DoubleU u;
3495

    
3496
    u.ll = val;
3497
    /* NaN are not treated the same way IEEE 754 does */
3498
    if (unlikely(float64_is_any_nan(u.d))) {
3499
        return 0;
3500
    }
3501

    
3502
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3503
}
3504

    
3505
uint32_t helper_efdctuiz (uint64_t val)
3506
{
3507
    CPU_DoubleU u;
3508

    
3509
    u.ll = val;
3510
    /* NaN are not treated the same way IEEE 754 does */
3511
    if (unlikely(float64_is_any_nan(u.d))) {
3512
        return 0;
3513
    }
3514

    
3515
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3516
}
3517

    
3518
uint64_t helper_efdctuidz (uint64_t val)
3519
{
3520
    CPU_DoubleU u;
3521

    
3522
    u.ll = val;
3523
    /* NaN are not treated the same way IEEE 754 does */
3524
    if (unlikely(float64_is_any_nan(u.d))) {
3525
        return 0;
3526
    }
3527

    
3528
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3529
}
3530

    
3531
uint64_t helper_efdcfsf (uint32_t val)
3532
{
3533
    CPU_DoubleU u;
3534
    float64 tmp;
3535

    
3536
    u.d = int32_to_float64(val, &env->vec_status);
3537
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3538
    u.d = float64_div(u.d, tmp, &env->vec_status);
3539

    
3540
    return u.ll;
3541
}
3542

    
3543
uint64_t helper_efdcfuf (uint32_t val)
3544
{
3545
    CPU_DoubleU u;
3546
    float64 tmp;
3547

    
3548
    u.d = uint32_to_float64(val, &env->vec_status);
3549
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3550
    u.d = float64_div(u.d, tmp, &env->vec_status);
3551

    
3552
    return u.ll;
3553
}
3554

    
3555
uint32_t helper_efdctsf (uint64_t val)
3556
{
3557
    CPU_DoubleU u;
3558
    float64 tmp;
3559

    
3560
    u.ll = val;
3561
    /* NaN are not treated the same way IEEE 754 does */
3562
    if (unlikely(float64_is_any_nan(u.d))) {
3563
        return 0;
3564
    }
3565
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3566
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3567

    
3568
    return float64_to_int32(u.d, &env->vec_status);
3569
}
3570

    
3571
uint32_t helper_efdctuf (uint64_t val)
3572
{
3573
    CPU_DoubleU u;
3574
    float64 tmp;
3575

    
3576
    u.ll = val;
3577
    /* NaN are not treated the same way IEEE 754 does */
3578
    if (unlikely(float64_is_any_nan(u.d))) {
3579
        return 0;
3580
    }
3581
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3582
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3583

    
3584
    return float64_to_uint32(u.d, &env->vec_status);
3585
}
3586

    
3587
uint32_t helper_efscfd (uint64_t val)
3588
{
3589
    CPU_DoubleU u1;
3590
    CPU_FloatU u2;
3591

    
3592
    u1.ll = val;
3593
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3594

    
3595
    return u2.l;
3596
}
3597

    
3598
uint64_t helper_efdcfs (uint32_t val)
3599
{
3600
    CPU_DoubleU u2;
3601
    CPU_FloatU u1;
3602

    
3603
    u1.l = val;
3604
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3605

    
3606
    return u2.ll;
3607
}
3608

    
3609
/* Double precision fixed-point arithmetic */
3610
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3611
{
3612
    CPU_DoubleU u1, u2;
3613
    u1.ll = op1;
3614
    u2.ll = op2;
3615
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3616
    return u1.ll;
3617
}
3618

    
3619
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3620
{
3621
    CPU_DoubleU u1, u2;
3622
    u1.ll = op1;
3623
    u2.ll = op2;
3624
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3625
    return u1.ll;
3626
}
3627

    
3628
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3629
{
3630
    CPU_DoubleU u1, u2;
3631
    u1.ll = op1;
3632
    u2.ll = op2;
3633
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3634
    return u1.ll;
3635
}
3636

    
3637
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3638
{
3639
    CPU_DoubleU u1, u2;
3640
    u1.ll = op1;
3641
    u2.ll = op2;
3642
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3643
    return u1.ll;
3644
}
3645

    
3646
/* Double precision floating point helpers */
3647
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3648
{
3649
    CPU_DoubleU u1, u2;
3650
    u1.ll = op1;
3651
    u2.ll = op2;
3652
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3653
}
3654

    
3655
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3656
{
3657
    CPU_DoubleU u1, u2;
3658
    u1.ll = op1;
3659
    u2.ll = op2;
3660
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3661
}
3662

    
3663
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3664
{
3665
    CPU_DoubleU u1, u2;
3666
    u1.ll = op1;
3667
    u2.ll = op2;
3668
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3669
}
3670

    
3671
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3672
{
3673
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3674
    return helper_efdtstlt(op1, op2);
3675
}
3676

    
3677
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3678
{
3679
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3680
    return helper_efdtstgt(op1, op2);
3681
}
3682

    
3683
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3684
{
3685
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3686
    return helper_efdtsteq(op1, op2);
3687
}
3688

    
3689
/*****************************************************************************/
3690
/* Softmmu support */
3691
#if !defined (CONFIG_USER_ONLY)
3692

    
3693
#define MMUSUFFIX _mmu
3694

    
3695
#define SHIFT 0
3696
#include "softmmu_template.h"
3697

    
3698
#define SHIFT 1
3699
#include "softmmu_template.h"
3700

    
3701
#define SHIFT 2
3702
#include "softmmu_template.h"
3703

    
3704
#define SHIFT 3
3705
#include "softmmu_template.h"
3706

    
3707
/* try to fill the TLB and return an exception if error. If retaddr is
3708
   NULL, it means that the function was called in C code (i.e. not
3709
   from generated code or from helper.c) */
3710
/* XXX: fix it to restore all registers */
3711
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3712
{
3713
    TranslationBlock *tb;
3714
    CPUState *saved_env;
3715
    unsigned long pc;
3716
    int ret;
3717

    
3718
    /* XXX: hack to restore env in all cases, even if not called from
3719
       generated code */
3720
    saved_env = env;
3721
    env = cpu_single_env;
3722
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3723
    if (unlikely(ret != 0)) {
3724
        if (likely(retaddr)) {
3725
            /* now we have a real cpu fault */
3726
            pc = (unsigned long)retaddr;
3727
            tb = tb_find_pc(pc);
3728
            if (likely(tb)) {
3729
                /* the PC is inside the translated code. It means that we have
3730
                   a virtual CPU fault */
3731
                cpu_restore_state(tb, env, pc, NULL);
3732
            }
3733
        }
3734
        helper_raise_exception_err(env->exception_index, env->error_code);
3735
    }
3736
    env = saved_env;
3737
}
3738

    
3739
/* Segment registers load and store */
3740
target_ulong helper_load_sr (target_ulong sr_num)
3741
{
3742
#if defined(TARGET_PPC64)
3743
    if (env->mmu_model & POWERPC_MMU_64)
3744
        return ppc_load_sr(env, sr_num);
3745
#endif
3746
    return env->sr[sr_num];
3747
}
3748

    
3749
void helper_store_sr (target_ulong sr_num, target_ulong val)
3750
{
3751
    ppc_store_sr(env, sr_num, val);
3752
}
3753

    
3754
/* SLB management */
3755
#if defined(TARGET_PPC64)
3756
void helper_store_slb (target_ulong rb, target_ulong rs)
3757
{
3758
    if (ppc_store_slb(env, rb, rs) < 0) {
3759
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3760
    }
3761
}
3762

    
3763
target_ulong helper_load_slb_esid (target_ulong rb)
3764
{
3765
    target_ulong rt;
3766

    
3767
    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3768
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3769
    }
3770
    return rt;
3771
}
3772

    
3773
target_ulong helper_load_slb_vsid (target_ulong rb)
3774
{
3775
    target_ulong rt;
3776

    
3777
    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3778
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3779
    }
3780
    return rt;
3781
}
3782

    
3783
void helper_slbia (void)
3784
{
3785
    ppc_slb_invalidate_all(env);
3786
}
3787

    
3788
void helper_slbie (target_ulong addr)
3789
{
3790
    ppc_slb_invalidate_one(env, addr);
3791
}
3792

    
3793
#endif /* defined(TARGET_PPC64) */
3794

    
3795
/* TLB management */
3796
void helper_tlbia (void)
3797
{
3798
    ppc_tlb_invalidate_all(env);
3799
}
3800

    
3801
void helper_tlbie (target_ulong addr)
3802
{
3803
    ppc_tlb_invalidate_one(env, addr);
3804
}
3805

    
3806
/* Software driven TLBs management */
3807
/* PowerPC 602/603 software TLB load instructions helpers */
3808
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3809
{
3810
    target_ulong RPN, CMP, EPN;
3811
    int way;
3812

    
3813
    RPN = env->spr[SPR_RPA];
3814
    if (is_code) {
3815
        CMP = env->spr[SPR_ICMP];
3816
        EPN = env->spr[SPR_IMISS];
3817
    } else {
3818
        CMP = env->spr[SPR_DCMP];
3819
        EPN = env->spr[SPR_DMISS];
3820
    }
3821
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3822
    (void)EPN; /* avoid a compiler warning */
3823
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3824
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3825
              RPN, way);
3826
    /* Store this TLB */
3827
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3828
                     way, is_code, CMP, RPN);
3829
}
3830

    
3831
void helper_6xx_tlbd (target_ulong EPN)
3832
{
3833
    do_6xx_tlb(EPN, 0);
3834
}
3835

    
3836
void helper_6xx_tlbi (target_ulong EPN)
3837
{
3838
    do_6xx_tlb(EPN, 1);
3839
}
3840

    
3841
/* PowerPC 74xx software TLB load instructions helpers */
3842
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3843
{
3844
    target_ulong RPN, CMP, EPN;
3845
    int way;
3846

    
3847
    RPN = env->spr[SPR_PTELO];
3848
    CMP = env->spr[SPR_PTEHI];
3849
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3850
    way = env->spr[SPR_TLBMISS] & 0x3;
3851
    (void)EPN; /* avoid a compiler warning */
3852
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3853
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3854
              RPN, way);
3855
    /* Store this TLB */
3856
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3857
                     way, is_code, CMP, RPN);
3858
}
3859

    
3860
void helper_74xx_tlbd (target_ulong EPN)
3861
{
3862
    do_74xx_tlb(EPN, 0);
3863
}
3864

    
3865
void helper_74xx_tlbi (target_ulong EPN)
3866
{
3867
    do_74xx_tlb(EPN, 1);
3868
}
3869

    
3870
static inline target_ulong booke_tlb_to_page_size(int size)
3871
{
3872
    return 1024 << (2 * size);
3873
}
3874

    
3875
static inline int booke_page_size_to_tlb(target_ulong page_size)
3876
{
3877
    int size;
3878

    
3879
    switch (page_size) {
3880
    case 0x00000400UL:
3881
        size = 0x0;
3882
        break;
3883
    case 0x00001000UL:
3884
        size = 0x1;
3885
        break;
3886
    case 0x00004000UL:
3887
        size = 0x2;
3888
        break;
3889
    case 0x00010000UL:
3890
        size = 0x3;
3891
        break;
3892
    case 0x00040000UL:
3893
        size = 0x4;
3894
        break;
3895
    case 0x00100000UL:
3896
        size = 0x5;
3897
        break;
3898
    case 0x00400000UL:
3899
        size = 0x6;
3900
        break;
3901
    case 0x01000000UL:
3902
        size = 0x7;
3903
        break;
3904
    case 0x04000000UL:
3905
        size = 0x8;
3906
        break;
3907
    case 0x10000000UL:
3908
        size = 0x9;
3909
        break;
3910
    case 0x40000000UL:
3911
        size = 0xA;
3912
        break;
3913
#if defined (TARGET_PPC64)
3914
    case 0x000100000000ULL:
3915
        size = 0xB;
3916
        break;
3917
    case 0x000400000000ULL:
3918
        size = 0xC;
3919
        break;
3920
    case 0x001000000000ULL:
3921
        size = 0xD;
3922
        break;
3923
    case 0x004000000000ULL:
3924
        size = 0xE;
3925
        break;
3926
    case 0x010000000000ULL:
3927
        size = 0xF;
3928
        break;
3929
#endif
3930
    default:
3931
        size = -1;
3932
        break;
3933
    }
3934

    
3935
    return size;
3936
}
3937

    
3938
/* Helpers for 4xx TLB management */
3939
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3940

    
3941
#define PPC4XX_TLBHI_V              0x00000040
3942
#define PPC4XX_TLBHI_E              0x00000020
3943
#define PPC4XX_TLBHI_SIZE_MIN       0
3944
#define PPC4XX_TLBHI_SIZE_MAX       7
3945
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3946
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3947
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3948

    
3949
#define PPC4XX_TLBLO_EX             0x00000200
3950
#define PPC4XX_TLBLO_WR             0x00000100
3951
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3952
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3953

    
3954
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3955
{
3956
    ppcemb_tlb_t *tlb;
3957
    target_ulong ret;
3958
    int size;
3959

    
3960
    entry &= PPC4XX_TLB_ENTRY_MASK;
3961
    tlb = &env->tlb[entry].tlbe;
3962
    ret = tlb->EPN;
3963
    if (tlb->prot & PAGE_VALID) {
3964
        ret |= PPC4XX_TLBHI_V;
3965
    }
3966
    size = booke_page_size_to_tlb(tlb->size);
3967
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3968
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3969
    }
3970
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3971
    env->spr[SPR_40x_PID] = tlb->PID;
3972
    return ret;
3973
}
3974

    
3975
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3976
{
3977
    ppcemb_tlb_t *tlb;
3978
    target_ulong ret;
3979

    
3980
    entry &= PPC4XX_TLB_ENTRY_MASK;
3981
    tlb = &env->tlb[entry].tlbe;
3982
    ret = tlb->RPN;
3983
    if (tlb->prot & PAGE_EXEC) {
3984
        ret |= PPC4XX_TLBLO_EX;
3985
    }
3986
    if (tlb->prot & PAGE_WRITE) {
3987
        ret |= PPC4XX_TLBLO_WR;
3988
    }
3989
    return ret;
3990
}
3991

    
3992
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3993
{
3994
    ppcemb_tlb_t *tlb;
3995
    target_ulong page, end;
3996

    
3997
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3998
              val);
3999
    entry &= PPC4XX_TLB_ENTRY_MASK;
4000
    tlb = &env->tlb[entry].tlbe;
4001
    /* Invalidate previous TLB (if it's valid) */
4002
    if (tlb->prot & PAGE_VALID) {
4003
        end = tlb->EPN + tlb->size;
4004
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4005
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4006
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4007
            tlb_flush_page(env, page);
4008
        }
4009
    }
4010
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4011
                                       & PPC4XX_TLBHI_SIZE_MASK);
4012
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4013
     * If this ever occurs, one should use the ppcemb target instead
4014
     * of the ppc or ppc64 one
4015
     */
4016
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4017
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4018
                  "are not supported (%d)\n",
4019
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4020
    }
4021
    tlb->EPN = val & ~(tlb->size - 1);
4022
    if (val & PPC4XX_TLBHI_V) {
4023
        tlb->prot |= PAGE_VALID;
4024
        if (val & PPC4XX_TLBHI_E) {
4025
            /* XXX: TO BE FIXED */
4026
            cpu_abort(env,
4027
                      "Little-endian TLB entries are not supported by now\n");
4028
        }
4029
    } else {
4030
        tlb->prot &= ~PAGE_VALID;
4031
    }
4032
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4033
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4034
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4035
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4036
              tlb->prot & PAGE_READ ? 'r' : '-',
4037
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4038
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4039
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4040
    /* Invalidate new TLB (if valid) */
4041
    if (tlb->prot & PAGE_VALID) {
4042
        end = tlb->EPN + tlb->size;
4043
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4044
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4045
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4046
            tlb_flush_page(env, page);
4047
        }
4048
    }
4049
}
4050

    
4051
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4052
{
4053
    ppcemb_tlb_t *tlb;
4054

    
4055
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4056
              val);
4057
    entry &= PPC4XX_TLB_ENTRY_MASK;
4058
    tlb = &env->tlb[entry].tlbe;
4059
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4060
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4061
    tlb->prot = PAGE_READ;
4062
    if (val & PPC4XX_TLBLO_EX) {
4063
        tlb->prot |= PAGE_EXEC;
4064
    }
4065
    if (val & PPC4XX_TLBLO_WR) {
4066
        tlb->prot |= PAGE_WRITE;
4067
    }
4068
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4069
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4070
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4071
              tlb->prot & PAGE_READ ? 'r' : '-',
4072
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4073
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4074
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4075
}
4076

    
4077
target_ulong helper_4xx_tlbsx (target_ulong address)
4078
{
4079
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4080
}
4081

    
4082
/* PowerPC 440 TLB management */
4083
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4084
{
4085
    ppcemb_tlb_t *tlb;
4086
    target_ulong EPN, RPN, size;
4087
    int do_flush_tlbs;
4088

    
4089
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4090
              __func__, word, (int)entry, value);
4091
    do_flush_tlbs = 0;
4092
    entry &= 0x3F;
4093
    tlb = &env->tlb[entry].tlbe;
4094
    switch (word) {
4095
    default:
4096
        /* Just here to please gcc */
4097
    case 0:
4098
        EPN = value & 0xFFFFFC00;
4099
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4100
            do_flush_tlbs = 1;
4101
        tlb->EPN = EPN;
4102
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4103
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4104
            do_flush_tlbs = 1;
4105
        tlb->size = size;
4106
        tlb->attr &= ~0x1;
4107
        tlb->attr |= (value >> 8) & 1;
4108
        if (value & 0x200) {
4109
            tlb->prot |= PAGE_VALID;
4110
        } else {
4111
            if (tlb->prot & PAGE_VALID) {
4112
                tlb->prot &= ~PAGE_VALID;
4113
                do_flush_tlbs = 1;
4114
            }
4115
        }
4116
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4117
        if (do_flush_tlbs)
4118
            tlb_flush(env, 1);
4119
        break;
4120
    case 1:
4121
        RPN = value & 0xFFFFFC0F;
4122
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4123
            tlb_flush(env, 1);
4124
        tlb->RPN = RPN;
4125
        break;
4126
    case 2:
4127
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4128
        tlb->prot = tlb->prot & PAGE_VALID;
4129
        if (value & 0x1)
4130
            tlb->prot |= PAGE_READ << 4;
4131
        if (value & 0x2)
4132
            tlb->prot |= PAGE_WRITE << 4;
4133
        if (value & 0x4)
4134
            tlb->prot |= PAGE_EXEC << 4;
4135
        if (value & 0x8)
4136
            tlb->prot |= PAGE_READ;
4137
        if (value & 0x10)
4138
            tlb->prot |= PAGE_WRITE;
4139
        if (value & 0x20)
4140
            tlb->prot |= PAGE_EXEC;
4141
        break;
4142
    }
4143
}
4144

    
4145
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4146
{
4147
    ppcemb_tlb_t *tlb;
4148
    target_ulong ret;
4149
    int size;
4150

    
4151
    entry &= 0x3F;
4152
    tlb = &env->tlb[entry].tlbe;
4153
    switch (word) {
4154
    default:
4155
        /* Just here to please gcc */
4156
    case 0:
4157
        ret = tlb->EPN;
4158
        size = booke_page_size_to_tlb(tlb->size);
4159
        if (size < 0 || size > 0xF)
4160
            size = 1;
4161
        ret |= size << 4;
4162
        if (tlb->attr & 0x1)
4163
            ret |= 0x100;
4164
        if (tlb->prot & PAGE_VALID)
4165
            ret |= 0x200;
4166
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4167
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4168
        break;
4169
    case 1:
4170
        ret = tlb->RPN;
4171
        break;
4172
    case 2:
4173
        ret = tlb->attr & ~0x1;
4174
        if (tlb->prot & (PAGE_READ << 4))
4175
            ret |= 0x1;
4176
        if (tlb->prot & (PAGE_WRITE << 4))
4177
            ret |= 0x2;
4178
        if (tlb->prot & (PAGE_EXEC << 4))
4179
            ret |= 0x4;
4180
        if (tlb->prot & PAGE_READ)
4181
            ret |= 0x8;
4182
        if (tlb->prot & PAGE_WRITE)
4183
            ret |= 0x10;
4184
        if (tlb->prot & PAGE_EXEC)
4185
            ret |= 0x20;
4186
        break;
4187
    }
4188
    return ret;
4189
}
4190

    
4191
target_ulong helper_440_tlbsx (target_ulong address)
4192
{
4193
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4194
}
4195

    
4196
#endif /* !CONFIG_USER_ONLY */