Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ eaabeef2

History | View | Annotate | Download (127.2 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90
target_ulong helper_load_purr (void)
91
{
92
    return (target_ulong)cpu_ppc_load_purr(env);
93
}
94
#endif
95

    
96
target_ulong helper_load_601_rtcl (void)
97
{
98
    return cpu_ppc601_load_rtcl(env);
99
}
100

    
101
target_ulong helper_load_601_rtcu (void)
102
{
103
    return cpu_ppc601_load_rtcu(env);
104
}
105

    
106
#if !defined(CONFIG_USER_ONLY)
107
#if defined (TARGET_PPC64)
108
void helper_store_asr (target_ulong val)
109
{
110
    ppc_store_asr(env, val);
111
}
112
#endif
113

    
114
void helper_store_sdr1 (target_ulong val)
115
{
116
    ppc_store_sdr1(env, val);
117
}
118

    
119
void helper_store_tbl (target_ulong val)
120
{
121
    cpu_ppc_store_tbl(env, val);
122
}
123

    
124
void helper_store_tbu (target_ulong val)
125
{
126
    cpu_ppc_store_tbu(env, val);
127
}
128

    
129
void helper_store_atbl (target_ulong val)
130
{
131
    cpu_ppc_store_atbl(env, val);
132
}
133

    
134
void helper_store_atbu (target_ulong val)
135
{
136
    cpu_ppc_store_atbu(env, val);
137
}
138

    
139
void helper_store_601_rtcl (target_ulong val)
140
{
141
    cpu_ppc601_store_rtcl(env, val);
142
}
143

    
144
void helper_store_601_rtcu (target_ulong val)
145
{
146
    cpu_ppc601_store_rtcu(env, val);
147
}
148

    
149
target_ulong helper_load_decr (void)
150
{
151
    return cpu_ppc_load_decr(env);
152
}
153

    
154
void helper_store_decr (target_ulong val)
155
{
156
    cpu_ppc_store_decr(env, val);
157
}
158

    
159
void helper_store_hid0_601 (target_ulong val)
160
{
161
    target_ulong hid0;
162

    
163
    hid0 = env->spr[SPR_HID0];
164
    if ((val ^ hid0) & 0x00000008) {
165
        /* Change current endianness */
166
        env->hflags &= ~(1 << MSR_LE);
167
        env->hflags_nmsr &= ~(1 << MSR_LE);
168
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169
        env->hflags |= env->hflags_nmsr;
170
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171
                 val & 0x8 ? 'l' : 'b', env->hflags);
172
    }
173
    env->spr[SPR_HID0] = (uint32_t)val;
174
}
175

    
176
void helper_store_403_pbr (uint32_t num, target_ulong value)
177
{
178
    if (likely(env->pb[num] != value)) {
179
        env->pb[num] = value;
180
        /* Should be optimized */
181
        tlb_flush(env, 1);
182
    }
183
}
184

    
185
target_ulong helper_load_40x_pit (void)
186
{
187
    return load_40x_pit(env);
188
}
189

    
190
void helper_store_40x_pit (target_ulong val)
191
{
192
    store_40x_pit(env, val);
193
}
194

    
195
void helper_store_40x_dbcr0 (target_ulong val)
196
{
197
    store_40x_dbcr0(env, val);
198
}
199

    
200
void helper_store_40x_sler (target_ulong val)
201
{
202
    store_40x_sler(env, val);
203
}
204

    
205
void helper_store_booke_tcr (target_ulong val)
206
{
207
    store_booke_tcr(env, val);
208
}
209

    
210
void helper_store_booke_tsr (target_ulong val)
211
{
212
    store_booke_tsr(env, val);
213
}
214

    
215
void helper_store_ibatu (uint32_t nr, target_ulong val)
216
{
217
    ppc_store_ibatu(env, nr, val);
218
}
219

    
220
void helper_store_ibatl (uint32_t nr, target_ulong val)
221
{
222
    ppc_store_ibatl(env, nr, val);
223
}
224

    
225
void helper_store_dbatu (uint32_t nr, target_ulong val)
226
{
227
    ppc_store_dbatu(env, nr, val);
228
}
229

    
230
void helper_store_dbatl (uint32_t nr, target_ulong val)
231
{
232
    ppc_store_dbatl(env, nr, val);
233
}
234

    
235
void helper_store_601_batl (uint32_t nr, target_ulong val)
236
{
237
    ppc_store_ibatl_601(env, nr, val);
238
}
239

    
240
void helper_store_601_batu (uint32_t nr, target_ulong val)
241
{
242
    ppc_store_ibatu_601(env, nr, val);
243
}
244
#endif
245

    
246
/*****************************************************************************/
247
/* Memory load and stores */
248

    
249
static inline target_ulong addr_add(target_ulong addr, target_long arg)
250
{
251
#if defined(TARGET_PPC64)
252
        if (!msr_sf)
253
            return (uint32_t)(addr + arg);
254
        else
255
#endif
256
            return addr + arg;
257
}
258

    
259
void helper_lmw (target_ulong addr, uint32_t reg)
260
{
261
    for (; reg < 32; reg++) {
262
        if (msr_le)
263
            env->gpr[reg] = bswap32(ldl(addr));
264
        else
265
            env->gpr[reg] = ldl(addr);
266
        addr = addr_add(addr, 4);
267
    }
268
}
269

    
270
void helper_stmw (target_ulong addr, uint32_t reg)
271
{
272
    for (; reg < 32; reg++) {
273
        if (msr_le)
274
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
275
        else
276
            stl(addr, (uint32_t)env->gpr[reg]);
277
        addr = addr_add(addr, 4);
278
    }
279
}
280

    
281
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
282
{
283
    int sh;
284
    for (; nb > 3; nb -= 4) {
285
        env->gpr[reg] = ldl(addr);
286
        reg = (reg + 1) % 32;
287
        addr = addr_add(addr, 4);
288
    }
289
    if (unlikely(nb > 0)) {
290
        env->gpr[reg] = 0;
291
        for (sh = 24; nb > 0; nb--, sh -= 8) {
292
            env->gpr[reg] |= ldub(addr) << sh;
293
            addr = addr_add(addr, 1);
294
        }
295
    }
296
}
297
/* PPC32 specification says we must generate an exception if
298
 * rA is in the range of registers to be loaded.
299
 * In an other hand, IBM says this is valid, but rA won't be loaded.
300
 * For now, I'll follow the spec...
301
 */
302
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
303
{
304
    if (likely(xer_bc != 0)) {
305
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306
                     (reg < rb && (reg + xer_bc) > rb))) {
307
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
308
                                       POWERPC_EXCP_INVAL |
309
                                       POWERPC_EXCP_INVAL_LSWX);
310
        } else {
311
            helper_lsw(addr, xer_bc, reg);
312
        }
313
    }
314
}
315

    
316
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
317
{
318
    int sh;
319
    for (; nb > 3; nb -= 4) {
320
        stl(addr, env->gpr[reg]);
321
        reg = (reg + 1) % 32;
322
        addr = addr_add(addr, 4);
323
    }
324
    if (unlikely(nb > 0)) {
325
        for (sh = 24; nb > 0; nb--, sh -= 8) {
326
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327
            addr = addr_add(addr, 1);
328
        }
329
    }
330
}
331

    
332
static void do_dcbz(target_ulong addr, int dcache_line_size)
333
{
334
    addr &= ~(dcache_line_size - 1);
335
    int i;
336
    for (i = 0 ; i < dcache_line_size ; i += 4) {
337
        stl(addr + i , 0);
338
    }
339
    if (env->reserve_addr == addr)
340
        env->reserve_addr = (target_ulong)-1ULL;
341
}
342

    
343
void helper_dcbz(target_ulong addr)
344
{
345
    do_dcbz(addr, env->dcache_line_size);
346
}
347

    
348
void helper_dcbz_970(target_ulong addr)
349
{
350
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
351
        do_dcbz(addr, 32);
352
    else
353
        do_dcbz(addr, env->dcache_line_size);
354
}
355

    
356
void helper_icbi(target_ulong addr)
357
{
358
    addr &= ~(env->dcache_line_size - 1);
359
    /* Invalidate one cache line :
360
     * PowerPC specification says this is to be treated like a load
361
     * (not a fetch) by the MMU. To be sure it will be so,
362
     * do the load "by hand".
363
     */
364
    ldl(addr);
365
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
366
}
367

    
368
// XXX: to be tested
369
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
370
{
371
    int i, c, d;
372
    d = 24;
373
    for (i = 0; i < xer_bc; i++) {
374
        c = ldub(addr);
375
        addr = addr_add(addr, 1);
376
        /* ra (if not 0) and rb are never modified */
377
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
378
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
379
        }
380
        if (unlikely(c == xer_cmp))
381
            break;
382
        if (likely(d != 0)) {
383
            d -= 8;
384
        } else {
385
            d = 24;
386
            reg++;
387
            reg = reg & 0x1F;
388
        }
389
    }
390
    return i;
391
}
392

    
393
/*****************************************************************************/
394
/* Fixed point operations helpers */
395
#if defined(TARGET_PPC64)
396

    
397
/* multiply high word */
398
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
399
{
400
    uint64_t tl, th;
401

    
402
    muls64(&tl, &th, arg1, arg2);
403
    return th;
404
}
405

    
406
/* multiply high word unsigned */
407
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
408
{
409
    uint64_t tl, th;
410

    
411
    mulu64(&tl, &th, arg1, arg2);
412
    return th;
413
}
414

    
415
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
416
{
417
    int64_t th;
418
    uint64_t tl;
419

    
420
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
421
    /* If th != 0 && th != -1, then we had an overflow */
422
    if (likely((uint64_t)(th + 1) <= 1)) {
423
        env->xer &= ~(1 << XER_OV);
424
    } else {
425
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
426
    }
427
    return (int64_t)tl;
428
}
429
#endif
430

    
431
target_ulong helper_cntlzw (target_ulong t)
432
{
433
    return clz32(t);
434
}
435

    
436
#if defined(TARGET_PPC64)
437
target_ulong helper_cntlzd (target_ulong t)
438
{
439
    return clz64(t);
440
}
441
#endif
442

    
443
/* shift right arithmetic helper */
444
target_ulong helper_sraw (target_ulong value, target_ulong shift)
445
{
446
    int32_t ret;
447

    
448
    if (likely(!(shift & 0x20))) {
449
        if (likely((uint32_t)shift != 0)) {
450
            shift &= 0x1f;
451
            ret = (int32_t)value >> shift;
452
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
453
                env->xer &= ~(1 << XER_CA);
454
            } else {
455
                env->xer |= (1 << XER_CA);
456
            }
457
        } else {
458
            ret = (int32_t)value;
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    } else {
462
        ret = (int32_t)value >> 31;
463
        if (ret) {
464
            env->xer |= (1 << XER_CA);
465
        } else {
466
            env->xer &= ~(1 << XER_CA);
467
        }
468
    }
469
    return (target_long)ret;
470
}
471

    
472
#if defined(TARGET_PPC64)
473
target_ulong helper_srad (target_ulong value, target_ulong shift)
474
{
475
    int64_t ret;
476

    
477
    if (likely(!(shift & 0x40))) {
478
        if (likely((uint64_t)shift != 0)) {
479
            shift &= 0x3f;
480
            ret = (int64_t)value >> shift;
481
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
482
                env->xer &= ~(1 << XER_CA);
483
            } else {
484
                env->xer |= (1 << XER_CA);
485
            }
486
        } else {
487
            ret = (int64_t)value;
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    } else {
491
        ret = (int64_t)value >> 63;
492
        if (ret) {
493
            env->xer |= (1 << XER_CA);
494
        } else {
495
            env->xer &= ~(1 << XER_CA);
496
        }
497
    }
498
    return ret;
499
}
500
#endif
501

    
502
#if defined(TARGET_PPC64)
503
target_ulong helper_popcntb (target_ulong val)
504
{
505
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
506
                                           0x5555555555555555ULL);
507
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
508
                                           0x3333333333333333ULL);
509
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
510
                                           0x0f0f0f0f0f0f0f0fULL);
511
    return val;
512
}
513

    
514
target_ulong helper_popcntw (target_ulong val)
515
{
516
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
517
                                           0x5555555555555555ULL);
518
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
519
                                           0x3333333333333333ULL);
520
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
521
                                           0x0f0f0f0f0f0f0f0fULL);
522
    val = (val & 0x00ff00ff00ff00ffULL) + ((val >>  8) &
523
                                           0x00ff00ff00ff00ffULL);
524
    val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
525
                                           0x0000ffff0000ffffULL);
526
    return val;
527
}
528

    
529
target_ulong helper_popcntd (target_ulong val)
530
{
531
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
532
                                           0x5555555555555555ULL);
533
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
534
                                           0x3333333333333333ULL);
535
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
536
                                           0x0f0f0f0f0f0f0f0fULL);
537
    val = (val & 0x00ff00ff00ff00ffULL) + ((val >>  8) &
538
                                           0x00ff00ff00ff00ffULL);
539
    val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
540
                                           0x0000ffff0000ffffULL);
541
    val = (val & 0x00000000ffffffffULL) + ((val >> 32) &
542
                                           0x00000000ffffffffULL);
543
    return val;
544
}
545
#else
546
target_ulong helper_popcntb (target_ulong val)
547
{
548
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
549
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
550
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
551
    return val;
552
}
553

    
554
target_ulong helper_popcntw (target_ulong val)
555
{
556
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
557
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
558
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
559
    val = (val & 0x00ff00ff) + ((val >>  8) & 0x00ff00ff);
560
    val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
561
    return val;
562
}
563
#endif
564

    
565
/*****************************************************************************/
566
/* Floating point operations helpers */
567
uint64_t helper_float32_to_float64(uint32_t arg)
568
{
569
    CPU_FloatU f;
570
    CPU_DoubleU d;
571
    f.l = arg;
572
    d.d = float32_to_float64(f.f, &env->fp_status);
573
    return d.ll;
574
}
575

    
576
uint32_t helper_float64_to_float32(uint64_t arg)
577
{
578
    CPU_FloatU f;
579
    CPU_DoubleU d;
580
    d.ll = arg;
581
    f.f = float64_to_float32(d.d, &env->fp_status);
582
    return f.l;
583
}
584

    
585
static inline int isden(float64 d)
586
{
587
    CPU_DoubleU u;
588

    
589
    u.d = d;
590

    
591
    return ((u.ll >> 52) & 0x7FF) == 0;
592
}
593

    
594
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
595
{
596
    CPU_DoubleU farg;
597
    int isneg;
598
    int ret;
599
    farg.ll = arg;
600
    isneg = float64_is_neg(farg.d);
601
    if (unlikely(float64_is_any_nan(farg.d))) {
602
        if (float64_is_signaling_nan(farg.d)) {
603
            /* Signaling NaN: flags are undefined */
604
            ret = 0x00;
605
        } else {
606
            /* Quiet NaN */
607
            ret = 0x11;
608
        }
609
    } else if (unlikely(float64_is_infinity(farg.d))) {
610
        /* +/- infinity */
611
        if (isneg)
612
            ret = 0x09;
613
        else
614
            ret = 0x05;
615
    } else {
616
        if (float64_is_zero(farg.d)) {
617
            /* +/- zero */
618
            if (isneg)
619
                ret = 0x12;
620
            else
621
                ret = 0x02;
622
        } else {
623
            if (isden(farg.d)) {
624
                /* Denormalized numbers */
625
                ret = 0x10;
626
            } else {
627
                /* Normalized numbers */
628
                ret = 0x00;
629
            }
630
            if (isneg) {
631
                ret |= 0x08;
632
            } else {
633
                ret |= 0x04;
634
            }
635
        }
636
    }
637
    if (set_fprf) {
638
        /* We update FPSCR_FPRF */
639
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
640
        env->fpscr |= ret << FPSCR_FPRF;
641
    }
642
    /* We just need fpcc to update Rc1 */
643
    return ret & 0xF;
644
}
645

    
646
/* Floating-point invalid operations exception */
647
static inline uint64_t fload_invalid_op_excp(int op)
648
{
649
    uint64_t ret = 0;
650
    int ve;
651

    
652
    ve = fpscr_ve;
653
    switch (op) {
654
    case POWERPC_EXCP_FP_VXSNAN:
655
        env->fpscr |= 1 << FPSCR_VXSNAN;
656
        break;
657
    case POWERPC_EXCP_FP_VXSOFT:
658
        env->fpscr |= 1 << FPSCR_VXSOFT;
659
        break;
660
    case POWERPC_EXCP_FP_VXISI:
661
        /* Magnitude subtraction of infinities */
662
        env->fpscr |= 1 << FPSCR_VXISI;
663
        goto update_arith;
664
    case POWERPC_EXCP_FP_VXIDI:
665
        /* Division of infinity by infinity */
666
        env->fpscr |= 1 << FPSCR_VXIDI;
667
        goto update_arith;
668
    case POWERPC_EXCP_FP_VXZDZ:
669
        /* Division of zero by zero */
670
        env->fpscr |= 1 << FPSCR_VXZDZ;
671
        goto update_arith;
672
    case POWERPC_EXCP_FP_VXIMZ:
673
        /* Multiplication of zero by infinity */
674
        env->fpscr |= 1 << FPSCR_VXIMZ;
675
        goto update_arith;
676
    case POWERPC_EXCP_FP_VXVC:
677
        /* Ordered comparison of NaN */
678
        env->fpscr |= 1 << FPSCR_VXVC;
679
        env->fpscr &= ~(0xF << FPSCR_FPCC);
680
        env->fpscr |= 0x11 << FPSCR_FPCC;
681
        /* We must update the target FPR before raising the exception */
682
        if (ve != 0) {
683
            env->exception_index = POWERPC_EXCP_PROGRAM;
684
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
685
            /* Update the floating-point enabled exception summary */
686
            env->fpscr |= 1 << FPSCR_FEX;
687
            /* Exception is differed */
688
            ve = 0;
689
        }
690
        break;
691
    case POWERPC_EXCP_FP_VXSQRT:
692
        /* Square root of a negative number */
693
        env->fpscr |= 1 << FPSCR_VXSQRT;
694
    update_arith:
695
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
696
        if (ve == 0) {
697
            /* Set the result to quiet NaN */
698
            ret = 0x7FF8000000000000ULL;
699
            env->fpscr &= ~(0xF << FPSCR_FPCC);
700
            env->fpscr |= 0x11 << FPSCR_FPCC;
701
        }
702
        break;
703
    case POWERPC_EXCP_FP_VXCVI:
704
        /* Invalid conversion */
705
        env->fpscr |= 1 << FPSCR_VXCVI;
706
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707
        if (ve == 0) {
708
            /* Set the result to quiet NaN */
709
            ret = 0x7FF8000000000000ULL;
710
            env->fpscr &= ~(0xF << FPSCR_FPCC);
711
            env->fpscr |= 0x11 << FPSCR_FPCC;
712
        }
713
        break;
714
    }
715
    /* Update the floating-point invalid operation summary */
716
    env->fpscr |= 1 << FPSCR_VX;
717
    /* Update the floating-point exception summary */
718
    env->fpscr |= 1 << FPSCR_FX;
719
    if (ve != 0) {
720
        /* Update the floating-point enabled exception summary */
721
        env->fpscr |= 1 << FPSCR_FEX;
722
        if (msr_fe0 != 0 || msr_fe1 != 0)
723
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
724
    }
725
    return ret;
726
}
727

    
728
static inline void float_zero_divide_excp(void)
729
{
730
    env->fpscr |= 1 << FPSCR_ZX;
731
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
732
    /* Update the floating-point exception summary */
733
    env->fpscr |= 1 << FPSCR_FX;
734
    if (fpscr_ze != 0) {
735
        /* Update the floating-point enabled exception summary */
736
        env->fpscr |= 1 << FPSCR_FEX;
737
        if (msr_fe0 != 0 || msr_fe1 != 0) {
738
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
739
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
740
        }
741
    }
742
}
743

    
744
static inline void float_overflow_excp(void)
745
{
746
    env->fpscr |= 1 << FPSCR_OX;
747
    /* Update the floating-point exception summary */
748
    env->fpscr |= 1 << FPSCR_FX;
749
    if (fpscr_oe != 0) {
750
        /* XXX: should adjust the result */
751
        /* Update the floating-point enabled exception summary */
752
        env->fpscr |= 1 << FPSCR_FEX;
753
        /* We must update the target FPR before raising the exception */
754
        env->exception_index = POWERPC_EXCP_PROGRAM;
755
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
756
    } else {
757
        env->fpscr |= 1 << FPSCR_XX;
758
        env->fpscr |= 1 << FPSCR_FI;
759
    }
760
}
761

    
762
static inline void float_underflow_excp(void)
763
{
764
    env->fpscr |= 1 << FPSCR_UX;
765
    /* Update the floating-point exception summary */
766
    env->fpscr |= 1 << FPSCR_FX;
767
    if (fpscr_ue != 0) {
768
        /* XXX: should adjust the result */
769
        /* Update the floating-point enabled exception summary */
770
        env->fpscr |= 1 << FPSCR_FEX;
771
        /* We must update the target FPR before raising the exception */
772
        env->exception_index = POWERPC_EXCP_PROGRAM;
773
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
774
    }
775
}
776

    
777
static inline void float_inexact_excp(void)
778
{
779
    env->fpscr |= 1 << FPSCR_XX;
780
    /* Update the floating-point exception summary */
781
    env->fpscr |= 1 << FPSCR_FX;
782
    if (fpscr_xe != 0) {
783
        /* Update the floating-point enabled exception summary */
784
        env->fpscr |= 1 << FPSCR_FEX;
785
        /* We must update the target FPR before raising the exception */
786
        env->exception_index = POWERPC_EXCP_PROGRAM;
787
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
788
    }
789
}
790

    
791
static inline void fpscr_set_rounding_mode(void)
792
{
793
    int rnd_type;
794

    
795
    /* Set rounding mode */
796
    switch (fpscr_rn) {
797
    case 0:
798
        /* Best approximation (round to nearest) */
799
        rnd_type = float_round_nearest_even;
800
        break;
801
    case 1:
802
        /* Smaller magnitude (round toward zero) */
803
        rnd_type = float_round_to_zero;
804
        break;
805
    case 2:
806
        /* Round toward +infinite */
807
        rnd_type = float_round_up;
808
        break;
809
    default:
810
    case 3:
811
        /* Round toward -infinite */
812
        rnd_type = float_round_down;
813
        break;
814
    }
815
    set_float_rounding_mode(rnd_type, &env->fp_status);
816
}
817

    
818
void helper_fpscr_clrbit (uint32_t bit)
819
{
820
    int prev;
821

    
822
    prev = (env->fpscr >> bit) & 1;
823
    env->fpscr &= ~(1 << bit);
824
    if (prev == 1) {
825
        switch (bit) {
826
        case FPSCR_RN1:
827
        case FPSCR_RN:
828
            fpscr_set_rounding_mode();
829
            break;
830
        default:
831
            break;
832
        }
833
    }
834
}
835

    
836
void helper_fpscr_setbit (uint32_t bit)
837
{
838
    int prev;
839

    
840
    prev = (env->fpscr >> bit) & 1;
841
    env->fpscr |= 1 << bit;
842
    if (prev == 0) {
843
        switch (bit) {
844
        case FPSCR_VX:
845
            env->fpscr |= 1 << FPSCR_FX;
846
            if (fpscr_ve)
847
                goto raise_ve;
848
        case FPSCR_OX:
849
            env->fpscr |= 1 << FPSCR_FX;
850
            if (fpscr_oe)
851
                goto raise_oe;
852
            break;
853
        case FPSCR_UX:
854
            env->fpscr |= 1 << FPSCR_FX;
855
            if (fpscr_ue)
856
                goto raise_ue;
857
            break;
858
        case FPSCR_ZX:
859
            env->fpscr |= 1 << FPSCR_FX;
860
            if (fpscr_ze)
861
                goto raise_ze;
862
            break;
863
        case FPSCR_XX:
864
            env->fpscr |= 1 << FPSCR_FX;
865
            if (fpscr_xe)
866
                goto raise_xe;
867
            break;
868
        case FPSCR_VXSNAN:
869
        case FPSCR_VXISI:
870
        case FPSCR_VXIDI:
871
        case FPSCR_VXZDZ:
872
        case FPSCR_VXIMZ:
873
        case FPSCR_VXVC:
874
        case FPSCR_VXSOFT:
875
        case FPSCR_VXSQRT:
876
        case FPSCR_VXCVI:
877
            env->fpscr |= 1 << FPSCR_VX;
878
            env->fpscr |= 1 << FPSCR_FX;
879
            if (fpscr_ve != 0)
880
                goto raise_ve;
881
            break;
882
        case FPSCR_VE:
883
            if (fpscr_vx != 0) {
884
            raise_ve:
885
                env->error_code = POWERPC_EXCP_FP;
886
                if (fpscr_vxsnan)
887
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
888
                if (fpscr_vxisi)
889
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
890
                if (fpscr_vxidi)
891
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
892
                if (fpscr_vxzdz)
893
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
894
                if (fpscr_vximz)
895
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
896
                if (fpscr_vxvc)
897
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
898
                if (fpscr_vxsoft)
899
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
900
                if (fpscr_vxsqrt)
901
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
902
                if (fpscr_vxcvi)
903
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
904
                goto raise_excp;
905
            }
906
            break;
907
        case FPSCR_OE:
908
            if (fpscr_ox != 0) {
909
            raise_oe:
910
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
911
                goto raise_excp;
912
            }
913
            break;
914
        case FPSCR_UE:
915
            if (fpscr_ux != 0) {
916
            raise_ue:
917
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
918
                goto raise_excp;
919
            }
920
            break;
921
        case FPSCR_ZE:
922
            if (fpscr_zx != 0) {
923
            raise_ze:
924
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
925
                goto raise_excp;
926
            }
927
            break;
928
        case FPSCR_XE:
929
            if (fpscr_xx != 0) {
930
            raise_xe:
931
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
932
                goto raise_excp;
933
            }
934
            break;
935
        case FPSCR_RN1:
936
        case FPSCR_RN:
937
            fpscr_set_rounding_mode();
938
            break;
939
        default:
940
            break;
941
        raise_excp:
942
            /* Update the floating-point enabled exception summary */
943
            env->fpscr |= 1 << FPSCR_FEX;
944
                /* We have to update Rc1 before raising the exception */
945
            env->exception_index = POWERPC_EXCP_PROGRAM;
946
            break;
947
        }
948
    }
949
}
950

    
951
void helper_store_fpscr (uint64_t arg, uint32_t mask)
952
{
953
    /*
954
     * We use only the 32 LSB of the incoming fpr
955
     */
956
    uint32_t prev, new;
957
    int i;
958

    
959
    prev = env->fpscr;
960
    new = (uint32_t)arg;
961
    new &= ~0x60000000;
962
    new |= prev & 0x60000000;
963
    for (i = 0; i < 8; i++) {
964
        if (mask & (1 << i)) {
965
            env->fpscr &= ~(0xF << (4 * i));
966
            env->fpscr |= new & (0xF << (4 * i));
967
        }
968
    }
969
    /* Update VX and FEX */
970
    if (fpscr_ix != 0)
971
        env->fpscr |= 1 << FPSCR_VX;
972
    else
973
        env->fpscr &= ~(1 << FPSCR_VX);
974
    if ((fpscr_ex & fpscr_eex) != 0) {
975
        env->fpscr |= 1 << FPSCR_FEX;
976
        env->exception_index = POWERPC_EXCP_PROGRAM;
977
        /* XXX: we should compute it properly */
978
        env->error_code = POWERPC_EXCP_FP;
979
    }
980
    else
981
        env->fpscr &= ~(1 << FPSCR_FEX);
982
    fpscr_set_rounding_mode();
983
}
984

    
985
void helper_float_check_status (void)
986
{
987
#ifdef CONFIG_SOFTFLOAT
988
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
989
        (env->error_code & POWERPC_EXCP_FP)) {
990
        /* Differred floating-point exception after target FPR update */
991
        if (msr_fe0 != 0 || msr_fe1 != 0)
992
            helper_raise_exception_err(env->exception_index, env->error_code);
993
    } else {
994
        int status = get_float_exception_flags(&env->fp_status);
995
        if (status & float_flag_divbyzero) {
996
            float_zero_divide_excp();
997
        } else if (status & float_flag_overflow) {
998
            float_overflow_excp();
999
        } else if (status & float_flag_underflow) {
1000
            float_underflow_excp();
1001
        } else if (status & float_flag_inexact) {
1002
            float_inexact_excp();
1003
        }
1004
    }
1005
#else
1006
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
1007
        (env->error_code & POWERPC_EXCP_FP)) {
1008
        /* Differred floating-point exception after target FPR update */
1009
        if (msr_fe0 != 0 || msr_fe1 != 0)
1010
            helper_raise_exception_err(env->exception_index, env->error_code);
1011
    }
1012
#endif
1013
}
1014

    
1015
#ifdef CONFIG_SOFTFLOAT
1016
void helper_reset_fpstatus (void)
1017
{
1018
    set_float_exception_flags(0, &env->fp_status);
1019
}
1020
#endif
1021

    
1022
/* fadd - fadd. */
1023
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1024
{
1025
    CPU_DoubleU farg1, farg2;
1026

    
1027
    farg1.ll = arg1;
1028
    farg2.ll = arg2;
1029

    
1030
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1031
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1032
        /* Magnitude subtraction of infinities */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1034
    } else {
1035
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1036
                     float64_is_signaling_nan(farg2.d))) {
1037
            /* sNaN addition */
1038
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1039
        }
1040
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1041
    }
1042

    
1043
    return farg1.ll;
1044
}
1045

    
1046
/* fsub - fsub. */
1047
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1048
{
1049
    CPU_DoubleU farg1, farg2;
1050

    
1051
    farg1.ll = arg1;
1052
    farg2.ll = arg2;
1053

    
1054
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1055
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1056
        /* Magnitude subtraction of infinities */
1057
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1058
    } else {
1059
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1060
                     float64_is_signaling_nan(farg2.d))) {
1061
            /* sNaN subtraction */
1062
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1063
        }
1064
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1065
    }
1066

    
1067
    return farg1.ll;
1068
}
1069

    
1070
/* fmul - fmul. */
1071
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1072
{
1073
    CPU_DoubleU farg1, farg2;
1074

    
1075
    farg1.ll = arg1;
1076
    farg2.ll = arg2;
1077

    
1078
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1079
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1080
        /* Multiplication of zero by infinity */
1081
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1082
    } else {
1083
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1084
                     float64_is_signaling_nan(farg2.d))) {
1085
            /* sNaN multiplication */
1086
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1087
        }
1088
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1089
    }
1090

    
1091
    return farg1.ll;
1092
}
1093

    
1094
/* fdiv - fdiv. */
1095
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1096
{
1097
    CPU_DoubleU farg1, farg2;
1098

    
1099
    farg1.ll = arg1;
1100
    farg2.ll = arg2;
1101

    
1102
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1103
        /* Division of infinity by infinity */
1104
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1105
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1106
        /* Division of zero by zero */
1107
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1108
    } else {
1109
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1110
                     float64_is_signaling_nan(farg2.d))) {
1111
            /* sNaN division */
1112
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1113
        }
1114
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1115
    }
1116

    
1117
    return farg1.ll;
1118
}
1119

    
1120
/* fabs */
1121
uint64_t helper_fabs (uint64_t arg)
1122
{
1123
    CPU_DoubleU farg;
1124

    
1125
    farg.ll = arg;
1126
    farg.d = float64_abs(farg.d);
1127
    return farg.ll;
1128
}
1129

    
1130
/* fnabs */
1131
uint64_t helper_fnabs (uint64_t arg)
1132
{
1133
    CPU_DoubleU farg;
1134

    
1135
    farg.ll = arg;
1136
    farg.d = float64_abs(farg.d);
1137
    farg.d = float64_chs(farg.d);
1138
    return farg.ll;
1139
}
1140

    
1141
/* fneg */
1142
uint64_t helper_fneg (uint64_t arg)
1143
{
1144
    CPU_DoubleU farg;
1145

    
1146
    farg.ll = arg;
1147
    farg.d = float64_chs(farg.d);
1148
    return farg.ll;
1149
}
1150

    
1151
/* fctiw - fctiw. */
1152
uint64_t helper_fctiw (uint64_t arg)
1153
{
1154
    CPU_DoubleU farg;
1155
    farg.ll = arg;
1156

    
1157
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1158
        /* sNaN conversion */
1159
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1160
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1161
        /* qNan / infinity conversion */
1162
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1163
    } else {
1164
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1165
        /* XXX: higher bits are not supposed to be significant.
1166
         *     to make tests easier, return the same as a real PowerPC 750
1167
         */
1168
        farg.ll |= 0xFFF80000ULL << 32;
1169
    }
1170
    return farg.ll;
1171
}
1172

    
1173
/* fctiwz - fctiwz. */
1174
uint64_t helper_fctiwz (uint64_t arg)
1175
{
1176
    CPU_DoubleU farg;
1177
    farg.ll = arg;
1178

    
1179
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1180
        /* sNaN conversion */
1181
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1182
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1183
        /* qNan / infinity conversion */
1184
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1185
    } else {
1186
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1187
        /* XXX: higher bits are not supposed to be significant.
1188
         *     to make tests easier, return the same as a real PowerPC 750
1189
         */
1190
        farg.ll |= 0xFFF80000ULL << 32;
1191
    }
1192
    return farg.ll;
1193
}
1194

    
1195
#if defined(TARGET_PPC64)
1196
/* fcfid - fcfid. */
1197
uint64_t helper_fcfid (uint64_t arg)
1198
{
1199
    CPU_DoubleU farg;
1200
    farg.d = int64_to_float64(arg, &env->fp_status);
1201
    return farg.ll;
1202
}
1203

    
1204
/* fctid - fctid. */
1205
uint64_t helper_fctid (uint64_t arg)
1206
{
1207
    CPU_DoubleU farg;
1208
    farg.ll = arg;
1209

    
1210
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1211
        /* sNaN conversion */
1212
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1213
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1214
        /* qNan / infinity conversion */
1215
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1216
    } else {
1217
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1218
    }
1219
    return farg.ll;
1220
}
1221

    
1222
/* fctidz - fctidz. */
1223
uint64_t helper_fctidz (uint64_t arg)
1224
{
1225
    CPU_DoubleU farg;
1226
    farg.ll = arg;
1227

    
1228
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1229
        /* sNaN conversion */
1230
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1231
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1232
        /* qNan / infinity conversion */
1233
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1234
    } else {
1235
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1236
    }
1237
    return farg.ll;
1238
}
1239

    
1240
#endif
1241

    
1242
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1243
{
1244
    CPU_DoubleU farg;
1245
    farg.ll = arg;
1246

    
1247
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1248
        /* sNaN round */
1249
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1250
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1251
        /* qNan / infinity round */
1252
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1253
    } else {
1254
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1255
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1256
        /* Restore rounding mode from FPSCR */
1257
        fpscr_set_rounding_mode();
1258
    }
1259
    return farg.ll;
1260
}
1261

    
1262
uint64_t helper_frin (uint64_t arg)
1263
{
1264
    return do_fri(arg, float_round_nearest_even);
1265
}
1266

    
1267
uint64_t helper_friz (uint64_t arg)
1268
{
1269
    return do_fri(arg, float_round_to_zero);
1270
}
1271

    
1272
uint64_t helper_frip (uint64_t arg)
1273
{
1274
    return do_fri(arg, float_round_up);
1275
}
1276

    
1277
uint64_t helper_frim (uint64_t arg)
1278
{
1279
    return do_fri(arg, float_round_down);
1280
}
1281

    
1282
/* fmadd - fmadd. */
1283
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1284
{
1285
    CPU_DoubleU farg1, farg2, farg3;
1286

    
1287
    farg1.ll = arg1;
1288
    farg2.ll = arg2;
1289
    farg3.ll = arg3;
1290

    
1291
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1292
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1293
        /* Multiplication of zero by infinity */
1294
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1295
    } else {
1296
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1297
                     float64_is_signaling_nan(farg2.d) ||
1298
                     float64_is_signaling_nan(farg3.d))) {
1299
            /* sNaN operation */
1300
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1301
        }
1302
#ifdef FLOAT128
1303
        /* This is the way the PowerPC specification defines it */
1304
        float128 ft0_128, ft1_128;
1305

    
1306
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1307
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1308
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1309
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1310
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1311
            /* Magnitude subtraction of infinities */
1312
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1313
        } else {
1314
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1315
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1316
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1317
        }
1318
#else
1319
        /* This is OK on x86 hosts */
1320
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1321
#endif
1322
    }
1323

    
1324
    return farg1.ll;
1325
}
1326

    
1327
/* fmsub - fmsub. */
1328
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1329
{
1330
    CPU_DoubleU farg1, farg2, farg3;
1331

    
1332
    farg1.ll = arg1;
1333
    farg2.ll = arg2;
1334
    farg3.ll = arg3;
1335

    
1336
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1337
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1338
        /* Multiplication of zero by infinity */
1339
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1340
    } else {
1341
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1342
                     float64_is_signaling_nan(farg2.d) ||
1343
                     float64_is_signaling_nan(farg3.d))) {
1344
            /* sNaN operation */
1345
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1346
        }
1347
#ifdef FLOAT128
1348
        /* This is the way the PowerPC specification defines it */
1349
        float128 ft0_128, ft1_128;
1350

    
1351
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1352
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1353
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1354
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1355
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1356
            /* Magnitude subtraction of infinities */
1357
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1358
        } else {
1359
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1360
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1361
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1362
        }
1363
#else
1364
        /* This is OK on x86 hosts */
1365
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1366
#endif
1367
    }
1368
    return farg1.ll;
1369
}
1370

    
1371
/* fnmadd - fnmadd. */
1372
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1373
{
1374
    CPU_DoubleU farg1, farg2, farg3;
1375

    
1376
    farg1.ll = arg1;
1377
    farg2.ll = arg2;
1378
    farg3.ll = arg3;
1379

    
1380
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1381
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1382
        /* Multiplication of zero by infinity */
1383
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1384
    } else {
1385
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1386
                     float64_is_signaling_nan(farg2.d) ||
1387
                     float64_is_signaling_nan(farg3.d))) {
1388
            /* sNaN operation */
1389
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1390
        }
1391
#ifdef FLOAT128
1392
        /* This is the way the PowerPC specification defines it */
1393
        float128 ft0_128, ft1_128;
1394

    
1395
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1396
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1397
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1398
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1399
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1400
            /* Magnitude subtraction of infinities */
1401
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1402
        } else {
1403
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1404
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1405
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1406
        }
1407
#else
1408
        /* This is OK on x86 hosts */
1409
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1410
#endif
1411
        if (likely(!float64_is_any_nan(farg1.d))) {
1412
            farg1.d = float64_chs(farg1.d);
1413
        }
1414
    }
1415
    return farg1.ll;
1416
}
1417

    
1418
/* fnmsub - fnmsub. */
1419
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1420
{
1421
    CPU_DoubleU farg1, farg2, farg3;
1422

    
1423
    farg1.ll = arg1;
1424
    farg2.ll = arg2;
1425
    farg3.ll = arg3;
1426

    
1427
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1428
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1429
        /* Multiplication of zero by infinity */
1430
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1431
    } else {
1432
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1433
                     float64_is_signaling_nan(farg2.d) ||
1434
                     float64_is_signaling_nan(farg3.d))) {
1435
            /* sNaN operation */
1436
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1437
        }
1438
#ifdef FLOAT128
1439
        /* This is the way the PowerPC specification defines it */
1440
        float128 ft0_128, ft1_128;
1441

    
1442
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1443
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1444
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1445
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1446
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1447
            /* Magnitude subtraction of infinities */
1448
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1449
        } else {
1450
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1451
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1452
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1453
        }
1454
#else
1455
        /* This is OK on x86 hosts */
1456
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1457
#endif
1458
        if (likely(!float64_is_any_nan(farg1.d))) {
1459
            farg1.d = float64_chs(farg1.d);
1460
        }
1461
    }
1462
    return farg1.ll;
1463
}
1464

    
1465
/* frsp - frsp. */
1466
uint64_t helper_frsp (uint64_t arg)
1467
{
1468
    CPU_DoubleU farg;
1469
    float32 f32;
1470
    farg.ll = arg;
1471

    
1472
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1473
        /* sNaN square root */
1474
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1475
    }
1476
    f32 = float64_to_float32(farg.d, &env->fp_status);
1477
    farg.d = float32_to_float64(f32, &env->fp_status);
1478

    
1479
    return farg.ll;
1480
}
1481

    
1482
/* fsqrt - fsqrt. */
1483
uint64_t helper_fsqrt (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1489
        /* Square root of a negative nonzero number */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1491
    } else {
1492
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1493
            /* sNaN square root */
1494
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1495
        }
1496
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1497
    }
1498
    return farg.ll;
1499
}
1500

    
1501
/* fre - fre. */
1502
uint64_t helper_fre (uint64_t arg)
1503
{
1504
    CPU_DoubleU farg;
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1508
        /* sNaN reciprocal */
1509
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510
    }
1511
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1512
    return farg.d;
1513
}
1514

    
1515
/* fres - fres. */
1516
uint64_t helper_fres (uint64_t arg)
1517
{
1518
    CPU_DoubleU farg;
1519
    float32 f32;
1520
    farg.ll = arg;
1521

    
1522
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1523
        /* sNaN reciprocal */
1524
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1525
    }
1526
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1527
    f32 = float64_to_float32(farg.d, &env->fp_status);
1528
    farg.d = float32_to_float64(f32, &env->fp_status);
1529

    
1530
    return farg.ll;
1531
}
1532

    
1533
/* frsqrte  - frsqrte. */
1534
uint64_t helper_frsqrte (uint64_t arg)
1535
{
1536
    CPU_DoubleU farg;
1537
    float32 f32;
1538
    farg.ll = arg;
1539

    
1540
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1541
        /* Reciprocal square root of a negative nonzero number */
1542
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1543
    } else {
1544
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1545
            /* sNaN reciprocal square root */
1546
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1547
        }
1548
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1549
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1550
        f32 = float64_to_float32(farg.d, &env->fp_status);
1551
        farg.d = float32_to_float64(f32, &env->fp_status);
1552
    }
1553
    return farg.ll;
1554
}
1555

    
1556
/* fsel - fsel. */
1557
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1558
{
1559
    CPU_DoubleU farg1;
1560

    
1561
    farg1.ll = arg1;
1562

    
1563
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1564
        return arg2;
1565
    } else {
1566
        return arg3;
1567
    }
1568
}
1569

    
1570
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1571
{
1572
    CPU_DoubleU farg1, farg2;
1573
    uint32_t ret = 0;
1574
    farg1.ll = arg1;
1575
    farg2.ll = arg2;
1576

    
1577
    if (unlikely(float64_is_any_nan(farg1.d) ||
1578
                 float64_is_any_nan(farg2.d))) {
1579
        ret = 0x01UL;
1580
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1581
        ret = 0x08UL;
1582
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1583
        ret = 0x04UL;
1584
    } else {
1585
        ret = 0x02UL;
1586
    }
1587

    
1588
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1589
    env->fpscr |= ret << FPSCR_FPRF;
1590
    env->crf[crfD] = ret;
1591
    if (unlikely(ret == 0x01UL
1592
                 && (float64_is_signaling_nan(farg1.d) ||
1593
                     float64_is_signaling_nan(farg2.d)))) {
1594
        /* sNaN comparison */
1595
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1596
    }
1597
}
1598

    
1599
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1600
{
1601
    CPU_DoubleU farg1, farg2;
1602
    uint32_t ret = 0;
1603
    farg1.ll = arg1;
1604
    farg2.ll = arg2;
1605

    
1606
    if (unlikely(float64_is_any_nan(farg1.d) ||
1607
                 float64_is_any_nan(farg2.d))) {
1608
        ret = 0x01UL;
1609
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1610
        ret = 0x08UL;
1611
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1612
        ret = 0x04UL;
1613
    } else {
1614
        ret = 0x02UL;
1615
    }
1616

    
1617
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1618
    env->fpscr |= ret << FPSCR_FPRF;
1619
    env->crf[crfD] = ret;
1620
    if (unlikely (ret == 0x01UL)) {
1621
        if (float64_is_signaling_nan(farg1.d) ||
1622
            float64_is_signaling_nan(farg2.d)) {
1623
            /* sNaN comparison */
1624
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1625
                                  POWERPC_EXCP_FP_VXVC);
1626
        } else {
1627
            /* qNaN comparison */
1628
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1629
        }
1630
    }
1631
}
1632

    
1633
#if !defined (CONFIG_USER_ONLY)
1634
void helper_store_msr (target_ulong val)
1635
{
1636
    val = hreg_store_msr(env, val, 0);
1637
    if (val != 0) {
1638
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1639
        helper_raise_exception(val);
1640
    }
1641
}
1642

    
1643
static inline void do_rfi(target_ulong nip, target_ulong msr,
1644
                          target_ulong msrm, int keep_msrh)
1645
{
1646
#if defined(TARGET_PPC64)
1647
    if (msr & (1ULL << MSR_SF)) {
1648
        nip = (uint64_t)nip;
1649
        msr &= (uint64_t)msrm;
1650
    } else {
1651
        nip = (uint32_t)nip;
1652
        msr = (uint32_t)(msr & msrm);
1653
        if (keep_msrh)
1654
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1655
    }
1656
#else
1657
    nip = (uint32_t)nip;
1658
    msr &= (uint32_t)msrm;
1659
#endif
1660
    /* XXX: beware: this is false if VLE is supported */
1661
    env->nip = nip & ~((target_ulong)0x00000003);
1662
    hreg_store_msr(env, msr, 1);
1663
#if defined (DEBUG_OP)
1664
    cpu_dump_rfi(env->nip, env->msr);
1665
#endif
1666
    /* No need to raise an exception here,
1667
     * as rfi is always the last insn of a TB
1668
     */
1669
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1670
}
1671

    
1672
void helper_rfi (void)
1673
{
1674
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1675
           ~((target_ulong)0x783F0000), 1);
1676
}
1677

    
1678
#if defined(TARGET_PPC64)
1679
void helper_rfid (void)
1680
{
1681
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1682
           ~((target_ulong)0x783F0000), 0);
1683
}
1684

    
1685
void helper_hrfid (void)
1686
{
1687
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1688
           ~((target_ulong)0x783F0000), 0);
1689
}
1690
#endif
1691
#endif
1692

    
1693
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1694
{
1695
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1696
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1697
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1698
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1699
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1700
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1701
    }
1702
}
1703

    
1704
#if defined(TARGET_PPC64)
1705
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1706
{
1707
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1708
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1709
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1710
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1711
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1712
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1713
}
1714
#endif
1715

    
1716
/*****************************************************************************/
1717
/* PowerPC 601 specific instructions (POWER bridge) */
1718

    
1719
target_ulong helper_clcs (uint32_t arg)
1720
{
1721
    switch (arg) {
1722
    case 0x0CUL:
1723
        /* Instruction cache line size */
1724
        return env->icache_line_size;
1725
        break;
1726
    case 0x0DUL:
1727
        /* Data cache line size */
1728
        return env->dcache_line_size;
1729
        break;
1730
    case 0x0EUL:
1731
        /* Minimum cache line size */
1732
        return (env->icache_line_size < env->dcache_line_size) ?
1733
                env->icache_line_size : env->dcache_line_size;
1734
        break;
1735
    case 0x0FUL:
1736
        /* Maximum cache line size */
1737
        return (env->icache_line_size > env->dcache_line_size) ?
1738
                env->icache_line_size : env->dcache_line_size;
1739
        break;
1740
    default:
1741
        /* Undefined */
1742
        return 0;
1743
        break;
1744
    }
1745
}
1746

    
1747
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1748
{
1749
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1750

    
1751
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1752
        (int32_t)arg2 == 0) {
1753
        env->spr[SPR_MQ] = 0;
1754
        return INT32_MIN;
1755
    } else {
1756
        env->spr[SPR_MQ] = tmp % arg2;
1757
        return  tmp / (int32_t)arg2;
1758
    }
1759
}
1760

    
1761
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1762
{
1763
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1764

    
1765
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1766
        (int32_t)arg2 == 0) {
1767
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1768
        env->spr[SPR_MQ] = 0;
1769
        return INT32_MIN;
1770
    } else {
1771
        env->spr[SPR_MQ] = tmp % arg2;
1772
        tmp /= (int32_t)arg2;
1773
        if ((int32_t)tmp != tmp) {
1774
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1775
        } else {
1776
            env->xer &= ~(1 << XER_OV);
1777
        }
1778
        return tmp;
1779
    }
1780
}
1781

    
1782
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1783
{
1784
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1785
        (int32_t)arg2 == 0) {
1786
        env->spr[SPR_MQ] = 0;
1787
        return INT32_MIN;
1788
    } else {
1789
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1790
        return (int32_t)arg1 / (int32_t)arg2;
1791
    }
1792
}
1793

    
1794
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1795
{
1796
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1797
        (int32_t)arg2 == 0) {
1798
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1799
        env->spr[SPR_MQ] = 0;
1800
        return INT32_MIN;
1801
    } else {
1802
        env->xer &= ~(1 << XER_OV);
1803
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1804
        return (int32_t)arg1 / (int32_t)arg2;
1805
    }
1806
}
1807

    
1808
#if !defined (CONFIG_USER_ONLY)
1809
target_ulong helper_rac (target_ulong addr)
1810
{
1811
    mmu_ctx_t ctx;
1812
    int nb_BATs;
1813
    target_ulong ret = 0;
1814

    
1815
    /* We don't have to generate many instances of this instruction,
1816
     * as rac is supervisor only.
1817
     */
1818
    /* XXX: FIX THIS: Pretend we have no BAT */
1819
    nb_BATs = env->nb_BATs;
1820
    env->nb_BATs = 0;
1821
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1822
        ret = ctx.raddr;
1823
    env->nb_BATs = nb_BATs;
1824
    return ret;
1825
}
1826

    
1827
void helper_rfsvc (void)
1828
{
1829
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1830
}
1831
#endif
1832

    
1833
/*****************************************************************************/
1834
/* 602 specific instructions */
1835
/* mfrom is the most crazy instruction ever seen, imho ! */
1836
/* Real implementation uses a ROM table. Do the same */
1837
/* Extremly decomposed:
1838
 *                      -arg / 256
1839
 * return 256 * log10(10           + 1.0) + 0.5
1840
 */
1841
#if !defined (CONFIG_USER_ONLY)
1842
target_ulong helper_602_mfrom (target_ulong arg)
1843
{
1844
    if (likely(arg < 602)) {
1845
#include "mfrom_table.c"
1846
        return mfrom_ROM_table[arg];
1847
    } else {
1848
        return 0;
1849
    }
1850
}
1851
#endif
1852

    
1853
/*****************************************************************************/
1854
/* Embedded PowerPC specific helpers */
1855

    
1856
/* XXX: to be improved to check access rights when in user-mode */
1857
target_ulong helper_load_dcr (target_ulong dcrn)
1858
{
1859
    uint32_t val = 0;
1860

    
1861
    if (unlikely(env->dcr_env == NULL)) {
1862
        qemu_log("No DCR environment\n");
1863
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1864
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1865
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1866
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1867
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1868
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1869
    }
1870
    return val;
1871
}
1872

    
1873
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1874
{
1875
    if (unlikely(env->dcr_env == NULL)) {
1876
        qemu_log("No DCR environment\n");
1877
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1878
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1879
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1880
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1881
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1882
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1883
    }
1884
}
1885

    
1886
#if !defined(CONFIG_USER_ONLY)
1887
void helper_40x_rfci (void)
1888
{
1889
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1890
           ~((target_ulong)0xFFFF0000), 0);
1891
}
1892

    
1893
void helper_rfci (void)
1894
{
1895
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1896
           ~((target_ulong)0x3FFF0000), 0);
1897
}
1898

    
1899
void helper_rfdi (void)
1900
{
1901
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1902
           ~((target_ulong)0x3FFF0000), 0);
1903
}
1904

    
1905
void helper_rfmci (void)
1906
{
1907
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1908
           ~((target_ulong)0x3FFF0000), 0);
1909
}
1910
#endif
1911

    
1912
/* 440 specific */
1913
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1914
{
1915
    target_ulong mask;
1916
    int i;
1917

    
1918
    i = 1;
1919
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1920
        if ((high & mask) == 0) {
1921
            if (update_Rc) {
1922
                env->crf[0] = 0x4;
1923
            }
1924
            goto done;
1925
        }
1926
        i++;
1927
    }
1928
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929
        if ((low & mask) == 0) {
1930
            if (update_Rc) {
1931
                env->crf[0] = 0x8;
1932
            }
1933
            goto done;
1934
        }
1935
        i++;
1936
    }
1937
    if (update_Rc) {
1938
        env->crf[0] = 0x2;
1939
    }
1940
 done:
1941
    env->xer = (env->xer & ~0x7F) | i;
1942
    if (update_Rc) {
1943
        env->crf[0] |= xer_so;
1944
    }
1945
    return i;
1946
}
1947

    
1948
/*****************************************************************************/
1949
/* Altivec extension helpers */
1950
#if defined(HOST_WORDS_BIGENDIAN)
1951
#define HI_IDX 0
1952
#define LO_IDX 1
1953
#else
1954
#define HI_IDX 1
1955
#define LO_IDX 0
1956
#endif
1957

    
1958
#if defined(HOST_WORDS_BIGENDIAN)
1959
#define VECTOR_FOR_INORDER_I(index, element)            \
1960
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1961
#else
1962
#define VECTOR_FOR_INORDER_I(index, element)            \
1963
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1964
#endif
1965

    
1966
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1967
 * execute the following block.  */
1968
#define DO_HANDLE_NAN(result, x)                \
1969
    if (float32_is_any_nan(x)) {                                \
1970
        CPU_FloatU __f;                                         \
1971
        __f.f = x;                                              \
1972
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1973
        result = __f.f;                                         \
1974
    } else
1975

    
1976
#define HANDLE_NAN1(result, x)                  \
1977
    DO_HANDLE_NAN(result, x)
1978
#define HANDLE_NAN2(result, x, y)               \
1979
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1980
#define HANDLE_NAN3(result, x, y, z)            \
1981
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1982

    
1983
/* Saturating arithmetic helpers.  */
1984
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1985
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1986
    {                                                                   \
1987
        to_type r;                                                      \
1988
        if (x < (from_type)min) {                                       \
1989
            r = min;                                                    \
1990
            *sat = 1;                                                   \
1991
        } else if (x > (from_type)max) {                                \
1992
            r = max;                                                    \
1993
            *sat = 1;                                                   \
1994
        } else {                                                        \
1995
            r = x;                                                      \
1996
        }                                                               \
1997
        return r;                                                       \
1998
    }
1999
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
2000
    static inline to_type cvt##from##to(from_type x, int *sat)          \
2001
    {                                                                   \
2002
        to_type r;                                                      \
2003
        if (x > (from_type)max) {                                       \
2004
            r = max;                                                    \
2005
            *sat = 1;                                                   \
2006
        } else {                                                        \
2007
            r = x;                                                      \
2008
        }                                                               \
2009
        return r;                                                       \
2010
    }
2011
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
2012
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
2013
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
2014

    
2015
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
2016
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
2017
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
2018
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
2019
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
2020
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
2021
#undef SATCVT
2022
#undef SATCVTU
2023

    
2024
#define LVE(name, access, swap, element)                        \
2025
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2026
    {                                                           \
2027
        size_t n_elems = ARRAY_SIZE(r->element);                \
2028
        int adjust = HI_IDX*(n_elems-1);                        \
2029
        int sh = sizeof(r->element[0]) >> 1;                    \
2030
        int index = (addr & 0xf) >> sh;                         \
2031
        if(msr_le) {                                            \
2032
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2033
        } else {                                                        \
2034
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2035
        }                                                               \
2036
    }
2037
#define I(x) (x)
2038
LVE(lvebx, ldub, I, u8)
2039
LVE(lvehx, lduw, bswap16, u16)
2040
LVE(lvewx, ldl, bswap32, u32)
2041
#undef I
2042
#undef LVE
2043

    
2044
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2045
{
2046
    int i, j = (sh & 0xf);
2047

    
2048
    VECTOR_FOR_INORDER_I (i, u8) {
2049
        r->u8[i] = j++;
2050
    }
2051
}
2052

    
2053
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2054
{
2055
    int i, j = 0x10 - (sh & 0xf);
2056

    
2057
    VECTOR_FOR_INORDER_I (i, u8) {
2058
        r->u8[i] = j++;
2059
    }
2060
}
2061

    
2062
#define STVE(name, access, swap, element)                       \
2063
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2064
    {                                                           \
2065
        size_t n_elems = ARRAY_SIZE(r->element);                \
2066
        int adjust = HI_IDX*(n_elems-1);                        \
2067
        int sh = sizeof(r->element[0]) >> 1;                    \
2068
        int index = (addr & 0xf) >> sh;                         \
2069
        if(msr_le) {                                            \
2070
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2071
        } else {                                                        \
2072
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2073
        }                                                               \
2074
    }
2075
#define I(x) (x)
2076
STVE(stvebx, stb, I, u8)
2077
STVE(stvehx, stw, bswap16, u16)
2078
STVE(stvewx, stl, bswap32, u32)
2079
#undef I
2080
#undef LVE
2081

    
2082
void helper_mtvscr (ppc_avr_t *r)
2083
{
2084
#if defined(HOST_WORDS_BIGENDIAN)
2085
    env->vscr = r->u32[3];
2086
#else
2087
    env->vscr = r->u32[0];
2088
#endif
2089
    set_flush_to_zero(vscr_nj, &env->vec_status);
2090
}
2091

    
2092
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2093
{
2094
    int i;
2095
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2096
        r->u32[i] = ~a->u32[i] < b->u32[i];
2097
    }
2098
}
2099

    
2100
#define VARITH_DO(name, op, element)        \
2101
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2102
{                                                                       \
2103
    int i;                                                              \
2104
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2105
        r->element[i] = a->element[i] op b->element[i];                 \
2106
    }                                                                   \
2107
}
2108
#define VARITH(suffix, element)                  \
2109
  VARITH_DO(add##suffix, +, element)             \
2110
  VARITH_DO(sub##suffix, -, element)
2111
VARITH(ubm, u8)
2112
VARITH(uhm, u16)
2113
VARITH(uwm, u32)
2114
#undef VARITH_DO
2115
#undef VARITH
2116

    
2117
#define VARITHFP(suffix, func)                                          \
2118
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2119
    {                                                                   \
2120
        int i;                                                          \
2121
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2122
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2123
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2124
            }                                                           \
2125
        }                                                               \
2126
    }
2127
VARITHFP(addfp, float32_add)
2128
VARITHFP(subfp, float32_sub)
2129
#undef VARITHFP
2130

    
2131
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2132
    {                                                                   \
2133
        type result = (type)a->element[i] op (type)b->element[i];       \
2134
        r->element[i] = cvt(result, &sat);                              \
2135
    }
2136

    
2137
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2138
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2139
    {                                                                   \
2140
        int sat = 0;                                                    \
2141
        int i;                                                          \
2142
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2143
            switch (sizeof(r->element[0])) {                            \
2144
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2145
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2146
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2147
            }                                                           \
2148
        }                                                               \
2149
        if (sat) {                                                      \
2150
            env->vscr |= (1 << VSCR_SAT);                               \
2151
        }                                                               \
2152
    }
2153
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2154
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2155
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2156
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2157
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2158
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2159
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2160
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2161
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2162
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2163
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2164
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2165
#undef VARITHSAT_CASE
2166
#undef VARITHSAT_DO
2167
#undef VARITHSAT_SIGNED
2168
#undef VARITHSAT_UNSIGNED
2169

    
2170
#define VAVG_DO(name, element, etype)                                   \
2171
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2172
    {                                                                   \
2173
        int i;                                                          \
2174
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2175
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2176
            r->element[i] = x >> 1;                                     \
2177
        }                                                               \
2178
    }
2179

    
2180
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2181
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2182
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2183
VAVG(b, s8, int16_t, u8, uint16_t)
2184
VAVG(h, s16, int32_t, u16, uint32_t)
2185
VAVG(w, s32, int64_t, u32, uint64_t)
2186
#undef VAVG_DO
2187
#undef VAVG
2188

    
2189
#define VCF(suffix, cvt, element)                                       \
2190
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2191
    {                                                                   \
2192
        int i;                                                          \
2193
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2194
            float32 t = cvt(b->element[i], &env->vec_status);           \
2195
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2196
        }                                                               \
2197
    }
2198
VCF(ux, uint32_to_float32, u32)
2199
VCF(sx, int32_to_float32, s32)
2200
#undef VCF
2201

    
2202
#define VCMP_DO(suffix, compare, element, record)                       \
2203
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2204
    {                                                                   \
2205
        uint32_t ones = (uint32_t)-1;                                   \
2206
        uint32_t all = ones;                                            \
2207
        uint32_t none = 0;                                              \
2208
        int i;                                                          \
2209
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2210
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2211
            switch (sizeof (a->element[0])) {                           \
2212
            case 4: r->u32[i] = result; break;                          \
2213
            case 2: r->u16[i] = result; break;                          \
2214
            case 1: r->u8[i] = result; break;                           \
2215
            }                                                           \
2216
            all &= result;                                              \
2217
            none |= result;                                             \
2218
        }                                                               \
2219
        if (record) {                                                   \
2220
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2221
        }                                                               \
2222
    }
2223
#define VCMP(suffix, compare, element)          \
2224
    VCMP_DO(suffix, compare, element, 0)        \
2225
    VCMP_DO(suffix##_dot, compare, element, 1)
2226
VCMP(equb, ==, u8)
2227
VCMP(equh, ==, u16)
2228
VCMP(equw, ==, u32)
2229
VCMP(gtub, >, u8)
2230
VCMP(gtuh, >, u16)
2231
VCMP(gtuw, >, u32)
2232
VCMP(gtsb, >, s8)
2233
VCMP(gtsh, >, s16)
2234
VCMP(gtsw, >, s32)
2235
#undef VCMP_DO
2236
#undef VCMP
2237

    
2238
#define VCMPFP_DO(suffix, compare, order, record)                       \
2239
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2240
    {                                                                   \
2241
        uint32_t ones = (uint32_t)-1;                                   \
2242
        uint32_t all = ones;                                            \
2243
        uint32_t none = 0;                                              \
2244
        int i;                                                          \
2245
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2246
            uint32_t result;                                            \
2247
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2248
            if (rel == float_relation_unordered) {                      \
2249
                result = 0;                                             \
2250
            } else if (rel compare order) {                             \
2251
                result = ones;                                          \
2252
            } else {                                                    \
2253
                result = 0;                                             \
2254
            }                                                           \
2255
            r->u32[i] = result;                                         \
2256
            all &= result;                                              \
2257
            none |= result;                                             \
2258
        }                                                               \
2259
        if (record) {                                                   \
2260
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2261
        }                                                               \
2262
    }
2263
#define VCMPFP(suffix, compare, order)           \
2264
    VCMPFP_DO(suffix, compare, order, 0)         \
2265
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2266
VCMPFP(eqfp, ==, float_relation_equal)
2267
VCMPFP(gefp, !=, float_relation_less)
2268
VCMPFP(gtfp, ==, float_relation_greater)
2269
#undef VCMPFP_DO
2270
#undef VCMPFP
2271

    
2272
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2273
                                    int record)
2274
{
2275
    int i;
2276
    int all_in = 0;
2277
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2278
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2279
        if (le_rel == float_relation_unordered) {
2280
            r->u32[i] = 0xc0000000;
2281
            /* ALL_IN does not need to be updated here.  */
2282
        } else {
2283
            float32 bneg = float32_chs(b->f[i]);
2284
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2285
            int le = le_rel != float_relation_greater;
2286
            int ge = ge_rel != float_relation_less;
2287
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2288
            all_in |= (!le | !ge);
2289
        }
2290
    }
2291
    if (record) {
2292
        env->crf[6] = (all_in == 0) << 1;
2293
    }
2294
}
2295

    
2296
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2297
{
2298
    vcmpbfp_internal(r, a, b, 0);
2299
}
2300

    
2301
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2302
{
2303
    vcmpbfp_internal(r, a, b, 1);
2304
}
2305

    
2306
#define VCT(suffix, satcvt, element)                                    \
2307
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2308
    {                                                                   \
2309
        int i;                                                          \
2310
        int sat = 0;                                                    \
2311
        float_status s = env->vec_status;                               \
2312
        set_float_rounding_mode(float_round_to_zero, &s);               \
2313
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2314
            if (float32_is_any_nan(b->f[i])) {                          \
2315
                r->element[i] = 0;                                      \
2316
            } else {                                                    \
2317
                float64 t = float32_to_float64(b->f[i], &s);            \
2318
                int64_t j;                                              \
2319
                t = float64_scalbn(t, uim, &s);                         \
2320
                j = float64_to_int64(t, &s);                            \
2321
                r->element[i] = satcvt(j, &sat);                        \
2322
            }                                                           \
2323
        }                                                               \
2324
        if (sat) {                                                      \
2325
            env->vscr |= (1 << VSCR_SAT);                               \
2326
        }                                                               \
2327
    }
2328
VCT(uxs, cvtsduw, u32)
2329
VCT(sxs, cvtsdsw, s32)
2330
#undef VCT
2331

    
2332
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2333
{
2334
    int i;
2335
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2336
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2337
            /* Need to do the computation in higher precision and round
2338
             * once at the end.  */
2339
            float64 af, bf, cf, t;
2340
            af = float32_to_float64(a->f[i], &env->vec_status);
2341
            bf = float32_to_float64(b->f[i], &env->vec_status);
2342
            cf = float32_to_float64(c->f[i], &env->vec_status);
2343
            t = float64_mul(af, cf, &env->vec_status);
2344
            t = float64_add(t, bf, &env->vec_status);
2345
            r->f[i] = float64_to_float32(t, &env->vec_status);
2346
        }
2347
    }
2348
}
2349

    
2350
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2351
{
2352
    int sat = 0;
2353
    int i;
2354

    
2355
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2356
        int32_t prod = a->s16[i] * b->s16[i];
2357
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2358
        r->s16[i] = cvtswsh (t, &sat);
2359
    }
2360

    
2361
    if (sat) {
2362
        env->vscr |= (1 << VSCR_SAT);
2363
    }
2364
}
2365

    
2366
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2367
{
2368
    int sat = 0;
2369
    int i;
2370

    
2371
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2372
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2373
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2374
        r->s16[i] = cvtswsh (t, &sat);
2375
    }
2376

    
2377
    if (sat) {
2378
        env->vscr |= (1 << VSCR_SAT);
2379
    }
2380
}
2381

    
2382
#define VMINMAX_DO(name, compare, element)                              \
2383
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2384
    {                                                                   \
2385
        int i;                                                          \
2386
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2387
            if (a->element[i] compare b->element[i]) {                  \
2388
                r->element[i] = b->element[i];                          \
2389
            } else {                                                    \
2390
                r->element[i] = a->element[i];                          \
2391
            }                                                           \
2392
        }                                                               \
2393
    }
2394
#define VMINMAX(suffix, element)                \
2395
  VMINMAX_DO(min##suffix, >, element)           \
2396
  VMINMAX_DO(max##suffix, <, element)
2397
VMINMAX(sb, s8)
2398
VMINMAX(sh, s16)
2399
VMINMAX(sw, s32)
2400
VMINMAX(ub, u8)
2401
VMINMAX(uh, u16)
2402
VMINMAX(uw, u32)
2403
#undef VMINMAX_DO
2404
#undef VMINMAX
2405

    
2406
#define VMINMAXFP(suffix, rT, rF)                                       \
2407
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2408
    {                                                                   \
2409
        int i;                                                          \
2410
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2411
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2412
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2413
                    r->f[i] = rT->f[i];                                 \
2414
                } else {                                                \
2415
                    r->f[i] = rF->f[i];                                 \
2416
                }                                                       \
2417
            }                                                           \
2418
        }                                                               \
2419
    }
2420
VMINMAXFP(minfp, a, b)
2421
VMINMAXFP(maxfp, b, a)
2422
#undef VMINMAXFP
2423

    
2424
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2425
{
2426
    int i;
2427
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2428
        int32_t prod = a->s16[i] * b->s16[i];
2429
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2430
    }
2431
}
2432

    
2433
#define VMRG_DO(name, element, highp)                                   \
2434
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2435
    {                                                                   \
2436
        ppc_avr_t result;                                               \
2437
        int i;                                                          \
2438
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2439
        for (i = 0; i < n_elems/2; i++) {                               \
2440
            if (highp) {                                                \
2441
                result.element[i*2+HI_IDX] = a->element[i];             \
2442
                result.element[i*2+LO_IDX] = b->element[i];             \
2443
            } else {                                                    \
2444
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2445
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2446
            }                                                           \
2447
        }                                                               \
2448
        *r = result;                                                    \
2449
    }
2450
#if defined(HOST_WORDS_BIGENDIAN)
2451
#define MRGHI 0
2452
#define MRGLO 1
2453
#else
2454
#define MRGHI 1
2455
#define MRGLO 0
2456
#endif
2457
#define VMRG(suffix, element)                   \
2458
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2459
  VMRG_DO(mrgh##suffix, element, MRGLO)
2460
VMRG(b, u8)
2461
VMRG(h, u16)
2462
VMRG(w, u32)
2463
#undef VMRG_DO
2464
#undef VMRG
2465
#undef MRGHI
2466
#undef MRGLO
2467

    
2468
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2469
{
2470
    int32_t prod[16];
2471
    int i;
2472

    
2473
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2474
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2475
    }
2476

    
2477
    VECTOR_FOR_INORDER_I(i, s32) {
2478
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2479
    }
2480
}
2481

    
2482
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2483
{
2484
    int32_t prod[8];
2485
    int i;
2486

    
2487
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2488
        prod[i] = a->s16[i] * b->s16[i];
2489
    }
2490

    
2491
    VECTOR_FOR_INORDER_I(i, s32) {
2492
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2493
    }
2494
}
2495

    
2496
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2497
{
2498
    int32_t prod[8];
2499
    int i;
2500
    int sat = 0;
2501

    
2502
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2503
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2504
    }
2505

    
2506
    VECTOR_FOR_INORDER_I (i, s32) {
2507
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2508
        r->u32[i] = cvtsdsw(t, &sat);
2509
    }
2510

    
2511
    if (sat) {
2512
        env->vscr |= (1 << VSCR_SAT);
2513
    }
2514
}
2515

    
2516
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2517
{
2518
    uint16_t prod[16];
2519
    int i;
2520

    
2521
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2522
        prod[i] = a->u8[i] * b->u8[i];
2523
    }
2524

    
2525
    VECTOR_FOR_INORDER_I(i, u32) {
2526
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2527
    }
2528
}
2529

    
2530
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2531
{
2532
    uint32_t prod[8];
2533
    int i;
2534

    
2535
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2536
        prod[i] = a->u16[i] * b->u16[i];
2537
    }
2538

    
2539
    VECTOR_FOR_INORDER_I(i, u32) {
2540
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2541
    }
2542
}
2543

    
2544
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2545
{
2546
    uint32_t prod[8];
2547
    int i;
2548
    int sat = 0;
2549

    
2550
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2551
        prod[i] = a->u16[i] * b->u16[i];
2552
    }
2553

    
2554
    VECTOR_FOR_INORDER_I (i, s32) {
2555
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2556
        r->u32[i] = cvtuduw(t, &sat);
2557
    }
2558

    
2559
    if (sat) {
2560
        env->vscr |= (1 << VSCR_SAT);
2561
    }
2562
}
2563

    
2564
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2565
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2566
    {                                                                   \
2567
        int i;                                                          \
2568
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2569
            if (evenp) {                                                \
2570
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2571
            } else {                                                    \
2572
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2573
            }                                                           \
2574
        }                                                               \
2575
    }
2576
#define VMUL(suffix, mul_element, prod_element) \
2577
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2578
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2579
VMUL(sb, s8, s16)
2580
VMUL(sh, s16, s32)
2581
VMUL(ub, u8, u16)
2582
VMUL(uh, u16, u32)
2583
#undef VMUL_DO
2584
#undef VMUL
2585

    
2586
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2587
{
2588
    int i;
2589
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2590
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2591
            /* Need to do the computation is higher precision and round
2592
             * once at the end.  */
2593
            float64 af, bf, cf, t;
2594
            af = float32_to_float64(a->f[i], &env->vec_status);
2595
            bf = float32_to_float64(b->f[i], &env->vec_status);
2596
            cf = float32_to_float64(c->f[i], &env->vec_status);
2597
            t = float64_mul(af, cf, &env->vec_status);
2598
            t = float64_sub(t, bf, &env->vec_status);
2599
            t = float64_chs(t);
2600
            r->f[i] = float64_to_float32(t, &env->vec_status);
2601
        }
2602
    }
2603
}
2604

    
2605
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2606
{
2607
    ppc_avr_t result;
2608
    int i;
2609
    VECTOR_FOR_INORDER_I (i, u8) {
2610
        int s = c->u8[i] & 0x1f;
2611
#if defined(HOST_WORDS_BIGENDIAN)
2612
        int index = s & 0xf;
2613
#else
2614
        int index = 15 - (s & 0xf);
2615
#endif
2616
        if (s & 0x10) {
2617
            result.u8[i] = b->u8[index];
2618
        } else {
2619
            result.u8[i] = a->u8[index];
2620
        }
2621
    }
2622
    *r = result;
2623
}
2624

    
2625
#if defined(HOST_WORDS_BIGENDIAN)
2626
#define PKBIG 1
2627
#else
2628
#define PKBIG 0
2629
#endif
2630
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2631
{
2632
    int i, j;
2633
    ppc_avr_t result;
2634
#if defined(HOST_WORDS_BIGENDIAN)
2635
    const ppc_avr_t *x[2] = { a, b };
2636
#else
2637
    const ppc_avr_t *x[2] = { b, a };
2638
#endif
2639

    
2640
    VECTOR_FOR_INORDER_I (i, u64) {
2641
        VECTOR_FOR_INORDER_I (j, u32){
2642
            uint32_t e = x[i]->u32[j];
2643
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2644
                                 ((e >> 6) & 0x3e0) |
2645
                                 ((e >> 3) & 0x1f));
2646
        }
2647
    }
2648
    *r = result;
2649
}
2650

    
2651
#define VPK(suffix, from, to, cvt, dosat)       \
2652
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2653
    {                                                                   \
2654
        int i;                                                          \
2655
        int sat = 0;                                                    \
2656
        ppc_avr_t result;                                               \
2657
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2658
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2659
        VECTOR_FOR_INORDER_I (i, from) {                                \
2660
            result.to[i] = cvt(a0->from[i], &sat);                      \
2661
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2662
        }                                                               \
2663
        *r = result;                                                    \
2664
        if (dosat && sat) {                                             \
2665
            env->vscr |= (1 << VSCR_SAT);                               \
2666
        }                                                               \
2667
    }
2668
#define I(x, y) (x)
2669
VPK(shss, s16, s8, cvtshsb, 1)
2670
VPK(shus, s16, u8, cvtshub, 1)
2671
VPK(swss, s32, s16, cvtswsh, 1)
2672
VPK(swus, s32, u16, cvtswuh, 1)
2673
VPK(uhus, u16, u8, cvtuhub, 1)
2674
VPK(uwus, u32, u16, cvtuwuh, 1)
2675
VPK(uhum, u16, u8, I, 0)
2676
VPK(uwum, u32, u16, I, 0)
2677
#undef I
2678
#undef VPK
2679
#undef PKBIG
2680

    
2681
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2682
{
2683
    int i;
2684
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2685
        HANDLE_NAN1(r->f[i], b->f[i]) {
2686
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2687
        }
2688
    }
2689
}
2690

    
2691
#define VRFI(suffix, rounding)                                          \
2692
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2693
    {                                                                   \
2694
        int i;                                                          \
2695
        float_status s = env->vec_status;                               \
2696
        set_float_rounding_mode(rounding, &s);                          \
2697
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2698
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2699
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2700
            }                                                           \
2701
        }                                                               \
2702
    }
2703
VRFI(n, float_round_nearest_even)
2704
VRFI(m, float_round_down)
2705
VRFI(p, float_round_up)
2706
VRFI(z, float_round_to_zero)
2707
#undef VRFI
2708

    
2709
#define VROTATE(suffix, element)                                        \
2710
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2711
    {                                                                   \
2712
        int i;                                                          \
2713
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2714
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2715
            unsigned int shift = b->element[i] & mask;                  \
2716
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2717
        }                                                               \
2718
    }
2719
VROTATE(b, u8)
2720
VROTATE(h, u16)
2721
VROTATE(w, u32)
2722
#undef VROTATE
2723

    
2724
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2725
{
2726
    int i;
2727
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2728
        HANDLE_NAN1(r->f[i], b->f[i]) {
2729
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2730
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2731
        }
2732
    }
2733
}
2734

    
2735
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2736
{
2737
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2738
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2739
}
2740

    
2741
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2742
{
2743
    int i;
2744
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2745
        HANDLE_NAN1(r->f[i], b->f[i]) {
2746
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2747
        }
2748
    }
2749
}
2750

    
2751
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2752
{
2753
    int i;
2754
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2755
        HANDLE_NAN1(r->f[i], b->f[i]) {
2756
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2757
        }
2758
    }
2759
}
2760

    
2761
#if defined(HOST_WORDS_BIGENDIAN)
2762
#define LEFT 0
2763
#define RIGHT 1
2764
#else
2765
#define LEFT 1
2766
#define RIGHT 0
2767
#endif
2768
/* The specification says that the results are undefined if all of the
2769
 * shift counts are not identical.  We check to make sure that they are
2770
 * to conform to what real hardware appears to do.  */
2771
#define VSHIFT(suffix, leftp)                                           \
2772
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2773
    {                                                                   \
2774
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2775
        int doit = 1;                                                   \
2776
        int i;                                                          \
2777
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2778
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2779
        }                                                               \
2780
        if (doit) {                                                     \
2781
            if (shift == 0) {                                           \
2782
                *r = *a;                                                \
2783
            } else if (leftp) {                                         \
2784
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2785
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2786
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2787
            } else {                                                    \
2788
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2789
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2790
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2791
            }                                                           \
2792
        }                                                               \
2793
    }
2794
VSHIFT(l, LEFT)
2795
VSHIFT(r, RIGHT)
2796
#undef VSHIFT
2797
#undef LEFT
2798
#undef RIGHT
2799

    
2800
#define VSL(suffix, element)                                            \
2801
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2802
    {                                                                   \
2803
        int i;                                                          \
2804
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2805
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2806
            unsigned int shift = b->element[i] & mask;                  \
2807
            r->element[i] = a->element[i] << shift;                     \
2808
        }                                                               \
2809
    }
2810
VSL(b, u8)
2811
VSL(h, u16)
2812
VSL(w, u32)
2813
#undef VSL
2814

    
2815
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2816
{
2817
    int sh = shift & 0xf;
2818
    int i;
2819
    ppc_avr_t result;
2820

    
2821
#if defined(HOST_WORDS_BIGENDIAN)
2822
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2823
        int index = sh + i;
2824
        if (index > 0xf) {
2825
            result.u8[i] = b->u8[index-0x10];
2826
        } else {
2827
            result.u8[i] = a->u8[index];
2828
        }
2829
    }
2830
#else
2831
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2832
        int index = (16 - sh) + i;
2833
        if (index > 0xf) {
2834
            result.u8[i] = a->u8[index-0x10];
2835
        } else {
2836
            result.u8[i] = b->u8[index];
2837
        }
2838
    }
2839
#endif
2840
    *r = result;
2841
}
2842

    
2843
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2844
{
2845
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2846

    
2847
#if defined (HOST_WORDS_BIGENDIAN)
2848
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2849
  memset (&r->u8[16-sh], 0, sh);
2850
#else
2851
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2852
  memset (&r->u8[0], 0, sh);
2853
#endif
2854
}
2855

    
2856
/* Experimental testing shows that hardware masks the immediate.  */
2857
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2858
#if defined(HOST_WORDS_BIGENDIAN)
2859
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2860
#else
2861
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2862
#endif
2863
#define VSPLT(suffix, element)                                          \
2864
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2865
    {                                                                   \
2866
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2867
        int i;                                                          \
2868
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2869
            r->element[i] = s;                                          \
2870
        }                                                               \
2871
    }
2872
VSPLT(b, u8)
2873
VSPLT(h, u16)
2874
VSPLT(w, u32)
2875
#undef VSPLT
2876
#undef SPLAT_ELEMENT
2877
#undef _SPLAT_MASKED
2878

    
2879
#define VSPLTI(suffix, element, splat_type)                     \
2880
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2881
    {                                                           \
2882
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2883
        int i;                                                  \
2884
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2885
            r->element[i] = x;                                  \
2886
        }                                                       \
2887
    }
2888
VSPLTI(b, s8, int8_t)
2889
VSPLTI(h, s16, int16_t)
2890
VSPLTI(w, s32, int32_t)
2891
#undef VSPLTI
2892

    
2893
#define VSR(suffix, element)                                            \
2894
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2895
    {                                                                   \
2896
        int i;                                                          \
2897
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2898
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2899
            unsigned int shift = b->element[i] & mask;                  \
2900
            r->element[i] = a->element[i] >> shift;                     \
2901
        }                                                               \
2902
    }
2903
VSR(ab, s8)
2904
VSR(ah, s16)
2905
VSR(aw, s32)
2906
VSR(b, u8)
2907
VSR(h, u16)
2908
VSR(w, u32)
2909
#undef VSR
2910

    
2911
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2912
{
2913
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2914

    
2915
#if defined (HOST_WORDS_BIGENDIAN)
2916
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2917
  memset (&r->u8[0], 0, sh);
2918
#else
2919
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2920
  memset (&r->u8[16-sh], 0, sh);
2921
#endif
2922
}
2923

    
2924
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2925
{
2926
    int i;
2927
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2928
        r->u32[i] = a->u32[i] >= b->u32[i];
2929
    }
2930
}
2931

    
2932
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2933
{
2934
    int64_t t;
2935
    int i, upper;
2936
    ppc_avr_t result;
2937
    int sat = 0;
2938

    
2939
#if defined(HOST_WORDS_BIGENDIAN)
2940
    upper = ARRAY_SIZE(r->s32)-1;
2941
#else
2942
    upper = 0;
2943
#endif
2944
    t = (int64_t)b->s32[upper];
2945
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2946
        t += a->s32[i];
2947
        result.s32[i] = 0;
2948
    }
2949
    result.s32[upper] = cvtsdsw(t, &sat);
2950
    *r = result;
2951

    
2952
    if (sat) {
2953
        env->vscr |= (1 << VSCR_SAT);
2954
    }
2955
}
2956

    
2957
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2958
{
2959
    int i, j, upper;
2960
    ppc_avr_t result;
2961
    int sat = 0;
2962

    
2963
#if defined(HOST_WORDS_BIGENDIAN)
2964
    upper = 1;
2965
#else
2966
    upper = 0;
2967
#endif
2968
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2969
        int64_t t = (int64_t)b->s32[upper+i*2];
2970
        result.u64[i] = 0;
2971
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2972
            t += a->s32[2*i+j];
2973
        }
2974
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2975
    }
2976

    
2977
    *r = result;
2978
    if (sat) {
2979
        env->vscr |= (1 << VSCR_SAT);
2980
    }
2981
}
2982

    
2983
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2984
{
2985
    int i, j;
2986
    int sat = 0;
2987

    
2988
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2989
        int64_t t = (int64_t)b->s32[i];
2990
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2991
            t += a->s8[4*i+j];
2992
        }
2993
        r->s32[i] = cvtsdsw(t, &sat);
2994
    }
2995

    
2996
    if (sat) {
2997
        env->vscr |= (1 << VSCR_SAT);
2998
    }
2999
}
3000

    
3001
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3002
{
3003
    int sat = 0;
3004
    int i;
3005

    
3006
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
3007
        int64_t t = (int64_t)b->s32[i];
3008
        t += a->s16[2*i] + a->s16[2*i+1];
3009
        r->s32[i] = cvtsdsw(t, &sat);
3010
    }
3011

    
3012
    if (sat) {
3013
        env->vscr |= (1 << VSCR_SAT);
3014
    }
3015
}
3016

    
3017
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3018
{
3019
    int i, j;
3020
    int sat = 0;
3021

    
3022
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
3023
        uint64_t t = (uint64_t)b->u32[i];
3024
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
3025
            t += a->u8[4*i+j];
3026
        }
3027
        r->u32[i] = cvtuduw(t, &sat);
3028
    }
3029

    
3030
    if (sat) {
3031
        env->vscr |= (1 << VSCR_SAT);
3032
    }
3033
}
3034

    
3035
#if defined(HOST_WORDS_BIGENDIAN)
3036
#define UPKHI 1
3037
#define UPKLO 0
3038
#else
3039
#define UPKHI 0
3040
#define UPKLO 1
3041
#endif
3042
#define VUPKPX(suffix, hi)                                      \
3043
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
3044
    {                                                           \
3045
        int i;                                                  \
3046
        ppc_avr_t result;                                       \
3047
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3048
            uint16_t e = b->u16[hi ? i : i+4];                  \
3049
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3050
            uint8_t r = (e >> 10) & 0x1f;                       \
3051
            uint8_t g = (e >> 5) & 0x1f;                        \
3052
            uint8_t b = e & 0x1f;                               \
3053
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3054
        }                                                               \
3055
        *r = result;                                                    \
3056
    }
3057
VUPKPX(lpx, UPKLO)
3058
VUPKPX(hpx, UPKHI)
3059
#undef VUPKPX
3060

    
3061
#define VUPK(suffix, unpacked, packee, hi)                              \
3062
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3063
    {                                                                   \
3064
        int i;                                                          \
3065
        ppc_avr_t result;                                               \
3066
        if (hi) {                                                       \
3067
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3068
                result.unpacked[i] = b->packee[i];                      \
3069
            }                                                           \
3070
        } else {                                                        \
3071
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3072
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3073
            }                                                           \
3074
        }                                                               \
3075
        *r = result;                                                    \
3076
    }
3077
VUPK(hsb, s16, s8, UPKHI)
3078
VUPK(hsh, s32, s16, UPKHI)
3079
VUPK(lsb, s16, s8, UPKLO)
3080
VUPK(lsh, s32, s16, UPKLO)
3081
#undef VUPK
3082
#undef UPKHI
3083
#undef UPKLO
3084

    
3085
#undef DO_HANDLE_NAN
3086
#undef HANDLE_NAN1
3087
#undef HANDLE_NAN2
3088
#undef HANDLE_NAN3
3089
#undef VECTOR_FOR_INORDER_I
3090
#undef HI_IDX
3091
#undef LO_IDX
3092

    
3093
/*****************************************************************************/
3094
/* SPE extension helpers */
3095
/* Use a table to make this quicker */
3096
static uint8_t hbrev[16] = {
3097
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3098
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3099
};
3100

    
3101
static inline uint8_t byte_reverse(uint8_t val)
3102
{
3103
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3104
}
3105

    
3106
static inline uint32_t word_reverse(uint32_t val)
3107
{
3108
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3109
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3110
}
3111

    
3112
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3113
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3114
{
3115
    uint32_t a, b, d, mask;
3116

    
3117
    mask = UINT32_MAX >> (32 - MASKBITS);
3118
    a = arg1 & mask;
3119
    b = arg2 & mask;
3120
    d = word_reverse(1 + word_reverse(a | ~b));
3121
    return (arg1 & ~mask) | (d & b);
3122
}
3123

    
3124
uint32_t helper_cntlsw32 (uint32_t val)
3125
{
3126
    if (val & 0x80000000)
3127
        return clz32(~val);
3128
    else
3129
        return clz32(val);
3130
}
3131

    
3132
uint32_t helper_cntlzw32 (uint32_t val)
3133
{
3134
    return clz32(val);
3135
}
3136

    
3137
/* Single-precision floating-point conversions */
3138
static inline uint32_t efscfsi(uint32_t val)
3139
{
3140
    CPU_FloatU u;
3141

    
3142
    u.f = int32_to_float32(val, &env->vec_status);
3143

    
3144
    return u.l;
3145
}
3146

    
3147
static inline uint32_t efscfui(uint32_t val)
3148
{
3149
    CPU_FloatU u;
3150

    
3151
    u.f = uint32_to_float32(val, &env->vec_status);
3152

    
3153
    return u.l;
3154
}
3155

    
3156
static inline int32_t efsctsi(uint32_t val)
3157
{
3158
    CPU_FloatU u;
3159

    
3160
    u.l = val;
3161
    /* NaN are not treated the same way IEEE 754 does */
3162
    if (unlikely(float32_is_quiet_nan(u.f)))
3163
        return 0;
3164

    
3165
    return float32_to_int32(u.f, &env->vec_status);
3166
}
3167

    
3168
static inline uint32_t efsctui(uint32_t val)
3169
{
3170
    CPU_FloatU u;
3171

    
3172
    u.l = val;
3173
    /* NaN are not treated the same way IEEE 754 does */
3174
    if (unlikely(float32_is_quiet_nan(u.f)))
3175
        return 0;
3176

    
3177
    return float32_to_uint32(u.f, &env->vec_status);
3178
}
3179

    
3180
static inline uint32_t efsctsiz(uint32_t val)
3181
{
3182
    CPU_FloatU u;
3183

    
3184
    u.l = val;
3185
    /* NaN are not treated the same way IEEE 754 does */
3186
    if (unlikely(float32_is_quiet_nan(u.f)))
3187
        return 0;
3188

    
3189
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3190
}
3191

    
3192
static inline uint32_t efsctuiz(uint32_t val)
3193
{
3194
    CPU_FloatU u;
3195

    
3196
    u.l = val;
3197
    /* NaN are not treated the same way IEEE 754 does */
3198
    if (unlikely(float32_is_quiet_nan(u.f)))
3199
        return 0;
3200

    
3201
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3202
}
3203

    
3204
static inline uint32_t efscfsf(uint32_t val)
3205
{
3206
    CPU_FloatU u;
3207
    float32 tmp;
3208

    
3209
    u.f = int32_to_float32(val, &env->vec_status);
3210
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3211
    u.f = float32_div(u.f, tmp, &env->vec_status);
3212

    
3213
    return u.l;
3214
}
3215

    
3216
static inline uint32_t efscfuf(uint32_t val)
3217
{
3218
    CPU_FloatU u;
3219
    float32 tmp;
3220

    
3221
    u.f = uint32_to_float32(val, &env->vec_status);
3222
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3223
    u.f = float32_div(u.f, tmp, &env->vec_status);
3224

    
3225
    return u.l;
3226
}
3227

    
3228
static inline uint32_t efsctsf(uint32_t val)
3229
{
3230
    CPU_FloatU u;
3231
    float32 tmp;
3232

    
3233
    u.l = val;
3234
    /* NaN are not treated the same way IEEE 754 does */
3235
    if (unlikely(float32_is_quiet_nan(u.f)))
3236
        return 0;
3237
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3238
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3239

    
3240
    return float32_to_int32(u.f, &env->vec_status);
3241
}
3242

    
3243
static inline uint32_t efsctuf(uint32_t val)
3244
{
3245
    CPU_FloatU u;
3246
    float32 tmp;
3247

    
3248
    u.l = val;
3249
    /* NaN are not treated the same way IEEE 754 does */
3250
    if (unlikely(float32_is_quiet_nan(u.f)))
3251
        return 0;
3252
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3253
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3254

    
3255
    return float32_to_uint32(u.f, &env->vec_status);
3256
}
3257

    
3258
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3259
uint32_t helper_e##name (uint32_t val)                                        \
3260
{                                                                             \
3261
    return e##name(val);                                                      \
3262
}
3263
/* efscfsi */
3264
HELPER_SPE_SINGLE_CONV(fscfsi);
3265
/* efscfui */
3266
HELPER_SPE_SINGLE_CONV(fscfui);
3267
/* efscfuf */
3268
HELPER_SPE_SINGLE_CONV(fscfuf);
3269
/* efscfsf */
3270
HELPER_SPE_SINGLE_CONV(fscfsf);
3271
/* efsctsi */
3272
HELPER_SPE_SINGLE_CONV(fsctsi);
3273
/* efsctui */
3274
HELPER_SPE_SINGLE_CONV(fsctui);
3275
/* efsctsiz */
3276
HELPER_SPE_SINGLE_CONV(fsctsiz);
3277
/* efsctuiz */
3278
HELPER_SPE_SINGLE_CONV(fsctuiz);
3279
/* efsctsf */
3280
HELPER_SPE_SINGLE_CONV(fsctsf);
3281
/* efsctuf */
3282
HELPER_SPE_SINGLE_CONV(fsctuf);
3283

    
3284
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3285
uint64_t helper_ev##name (uint64_t val)                                       \
3286
{                                                                             \
3287
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3288
            (uint64_t)e##name(val);                                           \
3289
}
3290
/* evfscfsi */
3291
HELPER_SPE_VECTOR_CONV(fscfsi);
3292
/* evfscfui */
3293
HELPER_SPE_VECTOR_CONV(fscfui);
3294
/* evfscfuf */
3295
HELPER_SPE_VECTOR_CONV(fscfuf);
3296
/* evfscfsf */
3297
HELPER_SPE_VECTOR_CONV(fscfsf);
3298
/* evfsctsi */
3299
HELPER_SPE_VECTOR_CONV(fsctsi);
3300
/* evfsctui */
3301
HELPER_SPE_VECTOR_CONV(fsctui);
3302
/* evfsctsiz */
3303
HELPER_SPE_VECTOR_CONV(fsctsiz);
3304
/* evfsctuiz */
3305
HELPER_SPE_VECTOR_CONV(fsctuiz);
3306
/* evfsctsf */
3307
HELPER_SPE_VECTOR_CONV(fsctsf);
3308
/* evfsctuf */
3309
HELPER_SPE_VECTOR_CONV(fsctuf);
3310

    
3311
/* Single-precision floating-point arithmetic */
3312
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3313
{
3314
    CPU_FloatU u1, u2;
3315
    u1.l = op1;
3316
    u2.l = op2;
3317
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3318
    return u1.l;
3319
}
3320

    
3321
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3322
{
3323
    CPU_FloatU u1, u2;
3324
    u1.l = op1;
3325
    u2.l = op2;
3326
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3327
    return u1.l;
3328
}
3329

    
3330
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3331
{
3332
    CPU_FloatU u1, u2;
3333
    u1.l = op1;
3334
    u2.l = op2;
3335
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3336
    return u1.l;
3337
}
3338

    
3339
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3340
{
3341
    CPU_FloatU u1, u2;
3342
    u1.l = op1;
3343
    u2.l = op2;
3344
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3345
    return u1.l;
3346
}
3347

    
3348
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3349
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3350
{                                                                             \
3351
    return e##name(op1, op2);                                                 \
3352
}
3353
/* efsadd */
3354
HELPER_SPE_SINGLE_ARITH(fsadd);
3355
/* efssub */
3356
HELPER_SPE_SINGLE_ARITH(fssub);
3357
/* efsmul */
3358
HELPER_SPE_SINGLE_ARITH(fsmul);
3359
/* efsdiv */
3360
HELPER_SPE_SINGLE_ARITH(fsdiv);
3361

    
3362
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3363
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3364
{                                                                             \
3365
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3366
            (uint64_t)e##name(op1, op2);                                      \
3367
}
3368
/* evfsadd */
3369
HELPER_SPE_VECTOR_ARITH(fsadd);
3370
/* evfssub */
3371
HELPER_SPE_VECTOR_ARITH(fssub);
3372
/* evfsmul */
3373
HELPER_SPE_VECTOR_ARITH(fsmul);
3374
/* evfsdiv */
3375
HELPER_SPE_VECTOR_ARITH(fsdiv);
3376

    
3377
/* Single-precision floating-point comparisons */
3378
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3379
{
3380
    CPU_FloatU u1, u2;
3381
    u1.l = op1;
3382
    u2.l = op2;
3383
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3384
}
3385

    
3386
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3387
{
3388
    CPU_FloatU u1, u2;
3389
    u1.l = op1;
3390
    u2.l = op2;
3391
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3392
}
3393

    
3394
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3395
{
3396
    CPU_FloatU u1, u2;
3397
    u1.l = op1;
3398
    u2.l = op2;
3399
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3400
}
3401

    
3402
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3403
{
3404
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3405
    return efststlt(op1, op2);
3406
}
3407

    
3408
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3409
{
3410
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3411
    return efststgt(op1, op2);
3412
}
3413

    
3414
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3415
{
3416
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3417
    return efststeq(op1, op2);
3418
}
3419

    
3420
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3421
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3422
{                                                                             \
3423
    return e##name(op1, op2) << 2;                                            \
3424
}
3425
/* efststlt */
3426
HELPER_SINGLE_SPE_CMP(fststlt);
3427
/* efststgt */
3428
HELPER_SINGLE_SPE_CMP(fststgt);
3429
/* efststeq */
3430
HELPER_SINGLE_SPE_CMP(fststeq);
3431
/* efscmplt */
3432
HELPER_SINGLE_SPE_CMP(fscmplt);
3433
/* efscmpgt */
3434
HELPER_SINGLE_SPE_CMP(fscmpgt);
3435
/* efscmpeq */
3436
HELPER_SINGLE_SPE_CMP(fscmpeq);
3437

    
3438
static inline uint32_t evcmp_merge(int t0, int t1)
3439
{
3440
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3441
}
3442

    
3443
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3444
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3445
{                                                                             \
3446
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3447
}
3448
/* evfststlt */
3449
HELPER_VECTOR_SPE_CMP(fststlt);
3450
/* evfststgt */
3451
HELPER_VECTOR_SPE_CMP(fststgt);
3452
/* evfststeq */
3453
HELPER_VECTOR_SPE_CMP(fststeq);
3454
/* evfscmplt */
3455
HELPER_VECTOR_SPE_CMP(fscmplt);
3456
/* evfscmpgt */
3457
HELPER_VECTOR_SPE_CMP(fscmpgt);
3458
/* evfscmpeq */
3459
HELPER_VECTOR_SPE_CMP(fscmpeq);
3460

    
3461
/* Double-precision floating-point conversion */
3462
uint64_t helper_efdcfsi (uint32_t val)
3463
{
3464
    CPU_DoubleU u;
3465

    
3466
    u.d = int32_to_float64(val, &env->vec_status);
3467

    
3468
    return u.ll;
3469
}
3470

    
3471
uint64_t helper_efdcfsid (uint64_t val)
3472
{
3473
    CPU_DoubleU u;
3474

    
3475
    u.d = int64_to_float64(val, &env->vec_status);
3476

    
3477
    return u.ll;
3478
}
3479

    
3480
uint64_t helper_efdcfui (uint32_t val)
3481
{
3482
    CPU_DoubleU u;
3483

    
3484
    u.d = uint32_to_float64(val, &env->vec_status);
3485

    
3486
    return u.ll;
3487
}
3488

    
3489
uint64_t helper_efdcfuid (uint64_t val)
3490
{
3491
    CPU_DoubleU u;
3492

    
3493
    u.d = uint64_to_float64(val, &env->vec_status);
3494

    
3495
    return u.ll;
3496
}
3497

    
3498
uint32_t helper_efdctsi (uint64_t val)
3499
{
3500
    CPU_DoubleU u;
3501

    
3502
    u.ll = val;
3503
    /* NaN are not treated the same way IEEE 754 does */
3504
    if (unlikely(float64_is_any_nan(u.d))) {
3505
        return 0;
3506
    }
3507

    
3508
    return float64_to_int32(u.d, &env->vec_status);
3509
}
3510

    
3511
uint32_t helper_efdctui (uint64_t val)
3512
{
3513
    CPU_DoubleU u;
3514

    
3515
    u.ll = val;
3516
    /* NaN are not treated the same way IEEE 754 does */
3517
    if (unlikely(float64_is_any_nan(u.d))) {
3518
        return 0;
3519
    }
3520

    
3521
    return float64_to_uint32(u.d, &env->vec_status);
3522
}
3523

    
3524
uint32_t helper_efdctsiz (uint64_t val)
3525
{
3526
    CPU_DoubleU u;
3527

    
3528
    u.ll = val;
3529
    /* NaN are not treated the same way IEEE 754 does */
3530
    if (unlikely(float64_is_any_nan(u.d))) {
3531
        return 0;
3532
    }
3533

    
3534
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3535
}
3536

    
3537
uint64_t helper_efdctsidz (uint64_t val)
3538
{
3539
    CPU_DoubleU u;
3540

    
3541
    u.ll = val;
3542
    /* NaN are not treated the same way IEEE 754 does */
3543
    if (unlikely(float64_is_any_nan(u.d))) {
3544
        return 0;
3545
    }
3546

    
3547
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3548
}
3549

    
3550
uint32_t helper_efdctuiz (uint64_t val)
3551
{
3552
    CPU_DoubleU u;
3553

    
3554
    u.ll = val;
3555
    /* NaN are not treated the same way IEEE 754 does */
3556
    if (unlikely(float64_is_any_nan(u.d))) {
3557
        return 0;
3558
    }
3559

    
3560
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3561
}
3562

    
3563
uint64_t helper_efdctuidz (uint64_t val)
3564
{
3565
    CPU_DoubleU u;
3566

    
3567
    u.ll = val;
3568
    /* NaN are not treated the same way IEEE 754 does */
3569
    if (unlikely(float64_is_any_nan(u.d))) {
3570
        return 0;
3571
    }
3572

    
3573
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3574
}
3575

    
3576
uint64_t helper_efdcfsf (uint32_t val)
3577
{
3578
    CPU_DoubleU u;
3579
    float64 tmp;
3580

    
3581
    u.d = int32_to_float64(val, &env->vec_status);
3582
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3583
    u.d = float64_div(u.d, tmp, &env->vec_status);
3584

    
3585
    return u.ll;
3586
}
3587

    
3588
uint64_t helper_efdcfuf (uint32_t val)
3589
{
3590
    CPU_DoubleU u;
3591
    float64 tmp;
3592

    
3593
    u.d = uint32_to_float64(val, &env->vec_status);
3594
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3595
    u.d = float64_div(u.d, tmp, &env->vec_status);
3596

    
3597
    return u.ll;
3598
}
3599

    
3600
uint32_t helper_efdctsf (uint64_t val)
3601
{
3602
    CPU_DoubleU u;
3603
    float64 tmp;
3604

    
3605
    u.ll = val;
3606
    /* NaN are not treated the same way IEEE 754 does */
3607
    if (unlikely(float64_is_any_nan(u.d))) {
3608
        return 0;
3609
    }
3610
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3611
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3612

    
3613
    return float64_to_int32(u.d, &env->vec_status);
3614
}
3615

    
3616
uint32_t helper_efdctuf (uint64_t val)
3617
{
3618
    CPU_DoubleU u;
3619
    float64 tmp;
3620

    
3621
    u.ll = val;
3622
    /* NaN are not treated the same way IEEE 754 does */
3623
    if (unlikely(float64_is_any_nan(u.d))) {
3624
        return 0;
3625
    }
3626
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3627
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3628

    
3629
    return float64_to_uint32(u.d, &env->vec_status);
3630
}
3631

    
3632
uint32_t helper_efscfd (uint64_t val)
3633
{
3634
    CPU_DoubleU u1;
3635
    CPU_FloatU u2;
3636

    
3637
    u1.ll = val;
3638
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3639

    
3640
    return u2.l;
3641
}
3642

    
3643
uint64_t helper_efdcfs (uint32_t val)
3644
{
3645
    CPU_DoubleU u2;
3646
    CPU_FloatU u1;
3647

    
3648
    u1.l = val;
3649
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3650

    
3651
    return u2.ll;
3652
}
3653

    
3654
/* Double precision fixed-point arithmetic */
3655
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3656
{
3657
    CPU_DoubleU u1, u2;
3658
    u1.ll = op1;
3659
    u2.ll = op2;
3660
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3661
    return u1.ll;
3662
}
3663

    
3664
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3665
{
3666
    CPU_DoubleU u1, u2;
3667
    u1.ll = op1;
3668
    u2.ll = op2;
3669
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3670
    return u1.ll;
3671
}
3672

    
3673
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3674
{
3675
    CPU_DoubleU u1, u2;
3676
    u1.ll = op1;
3677
    u2.ll = op2;
3678
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3679
    return u1.ll;
3680
}
3681

    
3682
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3683
{
3684
    CPU_DoubleU u1, u2;
3685
    u1.ll = op1;
3686
    u2.ll = op2;
3687
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3688
    return u1.ll;
3689
}
3690

    
3691
/* Double precision floating point helpers */
3692
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3693
{
3694
    CPU_DoubleU u1, u2;
3695
    u1.ll = op1;
3696
    u2.ll = op2;
3697
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3698
}
3699

    
3700
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3701
{
3702
    CPU_DoubleU u1, u2;
3703
    u1.ll = op1;
3704
    u2.ll = op2;
3705
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3706
}
3707

    
3708
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3709
{
3710
    CPU_DoubleU u1, u2;
3711
    u1.ll = op1;
3712
    u2.ll = op2;
3713
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3714
}
3715

    
3716
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3717
{
3718
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3719
    return helper_efdtstlt(op1, op2);
3720
}
3721

    
3722
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3723
{
3724
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3725
    return helper_efdtstgt(op1, op2);
3726
}
3727

    
3728
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3729
{
3730
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3731
    return helper_efdtsteq(op1, op2);
3732
}
3733

    
3734
/*****************************************************************************/
3735
/* Softmmu support */
3736
#if !defined (CONFIG_USER_ONLY)
3737

    
3738
#define MMUSUFFIX _mmu
3739

    
3740
#define SHIFT 0
3741
#include "softmmu_template.h"
3742

    
3743
#define SHIFT 1
3744
#include "softmmu_template.h"
3745

    
3746
#define SHIFT 2
3747
#include "softmmu_template.h"
3748

    
3749
#define SHIFT 3
3750
#include "softmmu_template.h"
3751

    
3752
/* try to fill the TLB and return an exception if error. If retaddr is
3753
   NULL, it means that the function was called in C code (i.e. not
3754
   from generated code or from helper.c) */
3755
/* XXX: fix it to restore all registers */
3756
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3757
{
3758
    TranslationBlock *tb;
3759
    CPUState *saved_env;
3760
    unsigned long pc;
3761
    int ret;
3762

    
3763
    /* XXX: hack to restore env in all cases, even if not called from
3764
       generated code */
3765
    saved_env = env;
3766
    env = cpu_single_env;
3767
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3768
    if (unlikely(ret != 0)) {
3769
        if (likely(retaddr)) {
3770
            /* now we have a real cpu fault */
3771
            pc = (unsigned long)retaddr;
3772
            tb = tb_find_pc(pc);
3773
            if (likely(tb)) {
3774
                /* the PC is inside the translated code. It means that we have
3775
                   a virtual CPU fault */
3776
                cpu_restore_state(tb, env, pc, NULL);
3777
            }
3778
        }
3779
        helper_raise_exception_err(env->exception_index, env->error_code);
3780
    }
3781
    env = saved_env;
3782
}
3783

    
3784
/* Segment registers load and store */
3785
target_ulong helper_load_sr (target_ulong sr_num)
3786
{
3787
#if defined(TARGET_PPC64)
3788
    if (env->mmu_model & POWERPC_MMU_64)
3789
        return ppc_load_sr(env, sr_num);
3790
#endif
3791
    return env->sr[sr_num];
3792
}
3793

    
3794
void helper_store_sr (target_ulong sr_num, target_ulong val)
3795
{
3796
    ppc_store_sr(env, sr_num, val);
3797
}
3798

    
3799
/* SLB management */
3800
#if defined(TARGET_PPC64)
3801
void helper_store_slb (target_ulong rb, target_ulong rs)
3802
{
3803
    if (ppc_store_slb(env, rb, rs) < 0) {
3804
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3805
    }
3806
}
3807

    
3808
target_ulong helper_load_slb_esid (target_ulong rb)
3809
{
3810
    target_ulong rt;
3811

    
3812
    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3813
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3814
    }
3815
    return rt;
3816
}
3817

    
3818
target_ulong helper_load_slb_vsid (target_ulong rb)
3819
{
3820
    target_ulong rt;
3821

    
3822
    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3823
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3824
    }
3825
    return rt;
3826
}
3827

    
3828
void helper_slbia (void)
3829
{
3830
    ppc_slb_invalidate_all(env);
3831
}
3832

    
3833
void helper_slbie (target_ulong addr)
3834
{
3835
    ppc_slb_invalidate_one(env, addr);
3836
}
3837

    
3838
#endif /* defined(TARGET_PPC64) */
3839

    
3840
/* TLB management */
3841
void helper_tlbia (void)
3842
{
3843
    ppc_tlb_invalidate_all(env);
3844
}
3845

    
3846
void helper_tlbie (target_ulong addr)
3847
{
3848
    ppc_tlb_invalidate_one(env, addr);
3849
}
3850

    
3851
/* Software driven TLBs management */
3852
/* PowerPC 602/603 software TLB load instructions helpers */
3853
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3854
{
3855
    target_ulong RPN, CMP, EPN;
3856
    int way;
3857

    
3858
    RPN = env->spr[SPR_RPA];
3859
    if (is_code) {
3860
        CMP = env->spr[SPR_ICMP];
3861
        EPN = env->spr[SPR_IMISS];
3862
    } else {
3863
        CMP = env->spr[SPR_DCMP];
3864
        EPN = env->spr[SPR_DMISS];
3865
    }
3866
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3867
    (void)EPN; /* avoid a compiler warning */
3868
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3869
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3870
              RPN, way);
3871
    /* Store this TLB */
3872
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3873
                     way, is_code, CMP, RPN);
3874
}
3875

    
3876
void helper_6xx_tlbd (target_ulong EPN)
3877
{
3878
    do_6xx_tlb(EPN, 0);
3879
}
3880

    
3881
void helper_6xx_tlbi (target_ulong EPN)
3882
{
3883
    do_6xx_tlb(EPN, 1);
3884
}
3885

    
3886
/* PowerPC 74xx software TLB load instructions helpers */
3887
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3888
{
3889
    target_ulong RPN, CMP, EPN;
3890
    int way;
3891

    
3892
    RPN = env->spr[SPR_PTELO];
3893
    CMP = env->spr[SPR_PTEHI];
3894
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3895
    way = env->spr[SPR_TLBMISS] & 0x3;
3896
    (void)EPN; /* avoid a compiler warning */
3897
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3898
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3899
              RPN, way);
3900
    /* Store this TLB */
3901
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3902
                     way, is_code, CMP, RPN);
3903
}
3904

    
3905
void helper_74xx_tlbd (target_ulong EPN)
3906
{
3907
    do_74xx_tlb(EPN, 0);
3908
}
3909

    
3910
void helper_74xx_tlbi (target_ulong EPN)
3911
{
3912
    do_74xx_tlb(EPN, 1);
3913
}
3914

    
3915
static inline target_ulong booke_tlb_to_page_size(int size)
3916
{
3917
    return 1024 << (2 * size);
3918
}
3919

    
3920
static inline int booke_page_size_to_tlb(target_ulong page_size)
3921
{
3922
    int size;
3923

    
3924
    switch (page_size) {
3925
    case 0x00000400UL:
3926
        size = 0x0;
3927
        break;
3928
    case 0x00001000UL:
3929
        size = 0x1;
3930
        break;
3931
    case 0x00004000UL:
3932
        size = 0x2;
3933
        break;
3934
    case 0x00010000UL:
3935
        size = 0x3;
3936
        break;
3937
    case 0x00040000UL:
3938
        size = 0x4;
3939
        break;
3940
    case 0x00100000UL:
3941
        size = 0x5;
3942
        break;
3943
    case 0x00400000UL:
3944
        size = 0x6;
3945
        break;
3946
    case 0x01000000UL:
3947
        size = 0x7;
3948
        break;
3949
    case 0x04000000UL:
3950
        size = 0x8;
3951
        break;
3952
    case 0x10000000UL:
3953
        size = 0x9;
3954
        break;
3955
    case 0x40000000UL:
3956
        size = 0xA;
3957
        break;
3958
#if defined (TARGET_PPC64)
3959
    case 0x000100000000ULL:
3960
        size = 0xB;
3961
        break;
3962
    case 0x000400000000ULL:
3963
        size = 0xC;
3964
        break;
3965
    case 0x001000000000ULL:
3966
        size = 0xD;
3967
        break;
3968
    case 0x004000000000ULL:
3969
        size = 0xE;
3970
        break;
3971
    case 0x010000000000ULL:
3972
        size = 0xF;
3973
        break;
3974
#endif
3975
    default:
3976
        size = -1;
3977
        break;
3978
    }
3979

    
3980
    return size;
3981
}
3982

    
3983
/* Helpers for 4xx TLB management */
3984
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3985

    
3986
#define PPC4XX_TLBHI_V              0x00000040
3987
#define PPC4XX_TLBHI_E              0x00000020
3988
#define PPC4XX_TLBHI_SIZE_MIN       0
3989
#define PPC4XX_TLBHI_SIZE_MAX       7
3990
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3991
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3992
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3993

    
3994
#define PPC4XX_TLBLO_EX             0x00000200
3995
#define PPC4XX_TLBLO_WR             0x00000100
3996
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3997
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3998

    
3999
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
4000
{
4001
    ppcemb_tlb_t *tlb;
4002
    target_ulong ret;
4003
    int size;
4004

    
4005
    entry &= PPC4XX_TLB_ENTRY_MASK;
4006
    tlb = &env->tlb[entry].tlbe;
4007
    ret = tlb->EPN;
4008
    if (tlb->prot & PAGE_VALID) {
4009
        ret |= PPC4XX_TLBHI_V;
4010
    }
4011
    size = booke_page_size_to_tlb(tlb->size);
4012
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
4013
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
4014
    }
4015
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
4016
    env->spr[SPR_40x_PID] = tlb->PID;
4017
    return ret;
4018
}
4019

    
4020
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
4021
{
4022
    ppcemb_tlb_t *tlb;
4023
    target_ulong ret;
4024

    
4025
    entry &= PPC4XX_TLB_ENTRY_MASK;
4026
    tlb = &env->tlb[entry].tlbe;
4027
    ret = tlb->RPN;
4028
    if (tlb->prot & PAGE_EXEC) {
4029
        ret |= PPC4XX_TLBLO_EX;
4030
    }
4031
    if (tlb->prot & PAGE_WRITE) {
4032
        ret |= PPC4XX_TLBLO_WR;
4033
    }
4034
    return ret;
4035
}
4036

    
4037
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
4038
{
4039
    ppcemb_tlb_t *tlb;
4040
    target_ulong page, end;
4041

    
4042
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
4043
              val);
4044
    entry &= PPC4XX_TLB_ENTRY_MASK;
4045
    tlb = &env->tlb[entry].tlbe;
4046
    /* Invalidate previous TLB (if it's valid) */
4047
    if (tlb->prot & PAGE_VALID) {
4048
        end = tlb->EPN + tlb->size;
4049
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4050
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4051
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4052
            tlb_flush_page(env, page);
4053
        }
4054
    }
4055
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4056
                                       & PPC4XX_TLBHI_SIZE_MASK);
4057
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4058
     * If this ever occurs, one should use the ppcemb target instead
4059
     * of the ppc or ppc64 one
4060
     */
4061
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4062
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4063
                  "are not supported (%d)\n",
4064
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4065
    }
4066
    tlb->EPN = val & ~(tlb->size - 1);
4067
    if (val & PPC4XX_TLBHI_V) {
4068
        tlb->prot |= PAGE_VALID;
4069
        if (val & PPC4XX_TLBHI_E) {
4070
            /* XXX: TO BE FIXED */
4071
            cpu_abort(env,
4072
                      "Little-endian TLB entries are not supported by now\n");
4073
        }
4074
    } else {
4075
        tlb->prot &= ~PAGE_VALID;
4076
    }
4077
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4078
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4079
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4080
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4081
              tlb->prot & PAGE_READ ? 'r' : '-',
4082
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4083
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4084
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4085
    /* Invalidate new TLB (if valid) */
4086
    if (tlb->prot & PAGE_VALID) {
4087
        end = tlb->EPN + tlb->size;
4088
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4089
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4090
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4091
            tlb_flush_page(env, page);
4092
        }
4093
    }
4094
}
4095

    
4096
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4097
{
4098
    ppcemb_tlb_t *tlb;
4099

    
4100
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4101
              val);
4102
    entry &= PPC4XX_TLB_ENTRY_MASK;
4103
    tlb = &env->tlb[entry].tlbe;
4104
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4105
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4106
    tlb->prot = PAGE_READ;
4107
    if (val & PPC4XX_TLBLO_EX) {
4108
        tlb->prot |= PAGE_EXEC;
4109
    }
4110
    if (val & PPC4XX_TLBLO_WR) {
4111
        tlb->prot |= PAGE_WRITE;
4112
    }
4113
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4114
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4115
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4116
              tlb->prot & PAGE_READ ? 'r' : '-',
4117
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4118
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4119
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4120
}
4121

    
4122
target_ulong helper_4xx_tlbsx (target_ulong address)
4123
{
4124
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4125
}
4126

    
4127
/* PowerPC 440 TLB management */
4128
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4129
{
4130
    ppcemb_tlb_t *tlb;
4131
    target_ulong EPN, RPN, size;
4132
    int do_flush_tlbs;
4133

    
4134
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4135
              __func__, word, (int)entry, value);
4136
    do_flush_tlbs = 0;
4137
    entry &= 0x3F;
4138
    tlb = &env->tlb[entry].tlbe;
4139
    switch (word) {
4140
    default:
4141
        /* Just here to please gcc */
4142
    case 0:
4143
        EPN = value & 0xFFFFFC00;
4144
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4145
            do_flush_tlbs = 1;
4146
        tlb->EPN = EPN;
4147
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4148
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4149
            do_flush_tlbs = 1;
4150
        tlb->size = size;
4151
        tlb->attr &= ~0x1;
4152
        tlb->attr |= (value >> 8) & 1;
4153
        if (value & 0x200) {
4154
            tlb->prot |= PAGE_VALID;
4155
        } else {
4156
            if (tlb->prot & PAGE_VALID) {
4157
                tlb->prot &= ~PAGE_VALID;
4158
                do_flush_tlbs = 1;
4159
            }
4160
        }
4161
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4162
        if (do_flush_tlbs)
4163
            tlb_flush(env, 1);
4164
        break;
4165
    case 1:
4166
        RPN = value & 0xFFFFFC0F;
4167
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4168
            tlb_flush(env, 1);
4169
        tlb->RPN = RPN;
4170
        break;
4171
    case 2:
4172
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4173
        tlb->prot = tlb->prot & PAGE_VALID;
4174
        if (value & 0x1)
4175
            tlb->prot |= PAGE_READ << 4;
4176
        if (value & 0x2)
4177
            tlb->prot |= PAGE_WRITE << 4;
4178
        if (value & 0x4)
4179
            tlb->prot |= PAGE_EXEC << 4;
4180
        if (value & 0x8)
4181
            tlb->prot |= PAGE_READ;
4182
        if (value & 0x10)
4183
            tlb->prot |= PAGE_WRITE;
4184
        if (value & 0x20)
4185
            tlb->prot |= PAGE_EXEC;
4186
        break;
4187
    }
4188
}
4189

    
4190
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4191
{
4192
    ppcemb_tlb_t *tlb;
4193
    target_ulong ret;
4194
    int size;
4195

    
4196
    entry &= 0x3F;
4197
    tlb = &env->tlb[entry].tlbe;
4198
    switch (word) {
4199
    default:
4200
        /* Just here to please gcc */
4201
    case 0:
4202
        ret = tlb->EPN;
4203
        size = booke_page_size_to_tlb(tlb->size);
4204
        if (size < 0 || size > 0xF)
4205
            size = 1;
4206
        ret |= size << 4;
4207
        if (tlb->attr & 0x1)
4208
            ret |= 0x100;
4209
        if (tlb->prot & PAGE_VALID)
4210
            ret |= 0x200;
4211
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4212
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4213
        break;
4214
    case 1:
4215
        ret = tlb->RPN;
4216
        break;
4217
    case 2:
4218
        ret = tlb->attr & ~0x1;
4219
        if (tlb->prot & (PAGE_READ << 4))
4220
            ret |= 0x1;
4221
        if (tlb->prot & (PAGE_WRITE << 4))
4222
            ret |= 0x2;
4223
        if (tlb->prot & (PAGE_EXEC << 4))
4224
            ret |= 0x4;
4225
        if (tlb->prot & PAGE_READ)
4226
            ret |= 0x8;
4227
        if (tlb->prot & PAGE_WRITE)
4228
            ret |= 0x10;
4229
        if (tlb->prot & PAGE_EXEC)
4230
            ret |= 0x20;
4231
        break;
4232
    }
4233
    return ret;
4234
}
4235

    
4236
target_ulong helper_440_tlbsx (target_ulong address)
4237
{
4238
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4239
}
4240

    
4241
#endif /* !CONFIG_USER_ONLY */