Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 5e1d0985

History | View | Annotate | Download (92.4 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33

    
34
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35
{
36
#if 0
37
    printf("Raise exception %3x code : %d\n", exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
/*****************************************************************************/
50
/* Registers load and stores */
51
target_ulong helper_load_cr (void)
52
{
53
    return (env->crf[0] << 28) |
54
           (env->crf[1] << 24) |
55
           (env->crf[2] << 20) |
56
           (env->crf[3] << 16) |
57
           (env->crf[4] << 12) |
58
           (env->crf[5] << 8) |
59
           (env->crf[6] << 4) |
60
           (env->crf[7] << 0);
61
}
62

    
63
void helper_store_cr (target_ulong val, uint32_t mask)
64
{
65
    int i, sh;
66

    
67
    for (i = 0, sh = 7; i < 8; i++, sh--) {
68
        if (mask & (1 << sh))
69
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
70
    }
71
}
72

    
73
/*****************************************************************************/
74
/* SPR accesses */
75
void helper_load_dump_spr (uint32_t sprn)
76
{
77
    if (loglevel != 0) {
78
        fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
79
                sprn, sprn, env->spr[sprn]);
80
    }
81
}
82

    
83
void helper_store_dump_spr (uint32_t sprn)
84
{
85
    if (loglevel != 0) {
86
        fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
87
                sprn, sprn, env->spr[sprn]);
88
    }
89
}
90

    
91
target_ulong helper_load_tbl (void)
92
{
93
    return cpu_ppc_load_tbl(env);
94
}
95

    
96
target_ulong helper_load_tbu (void)
97
{
98
    return cpu_ppc_load_tbu(env);
99
}
100

    
101
target_ulong helper_load_atbl (void)
102
{
103
    return cpu_ppc_load_atbl(env);
104
}
105

    
106
target_ulong helper_load_atbu (void)
107
{
108
    return cpu_ppc_load_atbu(env);
109
}
110

    
111
target_ulong helper_load_601_rtcl (void)
112
{
113
    return cpu_ppc601_load_rtcl(env);
114
}
115

    
116
target_ulong helper_load_601_rtcu (void)
117
{
118
    return cpu_ppc601_load_rtcu(env);
119
}
120

    
121
#if !defined(CONFIG_USER_ONLY)
122
#if defined (TARGET_PPC64)
123
void helper_store_asr (target_ulong val)
124
{
125
    ppc_store_asr(env, val);
126
}
127
#endif
128

    
129
void helper_store_sdr1 (target_ulong val)
130
{
131
    ppc_store_sdr1(env, val);
132
}
133

    
134
void helper_store_tbl (target_ulong val)
135
{
136
    cpu_ppc_store_tbl(env, val);
137
}
138

    
139
void helper_store_tbu (target_ulong val)
140
{
141
    cpu_ppc_store_tbu(env, val);
142
}
143

    
144
void helper_store_atbl (target_ulong val)
145
{
146
    cpu_ppc_store_atbl(env, val);
147
}
148

    
149
void helper_store_atbu (target_ulong val)
150
{
151
    cpu_ppc_store_atbu(env, val);
152
}
153

    
154
void helper_store_601_rtcl (target_ulong val)
155
{
156
    cpu_ppc601_store_rtcl(env, val);
157
}
158

    
159
void helper_store_601_rtcu (target_ulong val)
160
{
161
    cpu_ppc601_store_rtcu(env, val);
162
}
163

    
164
target_ulong helper_load_decr (void)
165
{
166
    return cpu_ppc_load_decr(env);
167
}
168

    
169
void helper_store_decr (target_ulong val)
170
{
171
    cpu_ppc_store_decr(env, val);
172
}
173

    
174
void helper_store_hid0_601 (target_ulong val)
175
{
176
    target_ulong hid0;
177

    
178
    hid0 = env->spr[SPR_HID0];
179
    if ((val ^ hid0) & 0x00000008) {
180
        /* Change current endianness */
181
        env->hflags &= ~(1 << MSR_LE);
182
        env->hflags_nmsr &= ~(1 << MSR_LE);
183
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
184
        env->hflags |= env->hflags_nmsr;
185
        if (loglevel != 0) {
186
            fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
187
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
188
        }
189
    }
190
    env->spr[SPR_HID0] = (uint32_t)val;
191
}
192

    
193
void helper_store_403_pbr (uint32_t num, target_ulong value)
194
{
195
    if (likely(env->pb[num] != value)) {
196
        env->pb[num] = value;
197
        /* Should be optimized */
198
        tlb_flush(env, 1);
199
    }
200
}
201

    
202
target_ulong helper_load_40x_pit (void)
203
{
204
    return load_40x_pit(env);
205
}
206

    
207
void helper_store_40x_pit (target_ulong val)
208
{
209
    store_40x_pit(env, val);
210
}
211

    
212
void helper_store_40x_dbcr0 (target_ulong val)
213
{
214
    store_40x_dbcr0(env, val);
215
}
216

    
217
void helper_store_40x_sler (target_ulong val)
218
{
219
    store_40x_sler(env, val);
220
}
221

    
222
void helper_store_booke_tcr (target_ulong val)
223
{
224
    store_booke_tcr(env, val);
225
}
226

    
227
void helper_store_booke_tsr (target_ulong val)
228
{
229
    store_booke_tsr(env, val);
230
}
231

    
232
void helper_store_ibatu (uint32_t nr, target_ulong val)
233
{
234
    ppc_store_ibatu(env, nr, val);
235
}
236

    
237
void helper_store_ibatl (uint32_t nr, target_ulong val)
238
{
239
    ppc_store_ibatl(env, nr, val);
240
}
241

    
242
void helper_store_dbatu (uint32_t nr, target_ulong val)
243
{
244
    ppc_store_dbatu(env, nr, val);
245
}
246

    
247
void helper_store_dbatl (uint32_t nr, target_ulong val)
248
{
249
    ppc_store_dbatl(env, nr, val);
250
}
251

    
252
void helper_store_601_batl (uint32_t nr, target_ulong val)
253
{
254
    ppc_store_ibatl_601(env, nr, val);
255
}
256

    
257
void helper_store_601_batu (uint32_t nr, target_ulong val)
258
{
259
    ppc_store_ibatu_601(env, nr, val);
260
}
261
#endif
262

    
263
/*****************************************************************************/
264
/* Memory load and stores */
265

    
266
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267
{
268
#if defined(TARGET_PPC64)
269
        if (!msr_sf)
270
            return (uint32_t)(addr + arg);
271
        else
272
#endif
273
            return addr + arg;
274
}
275

    
276
void helper_lmw (target_ulong addr, uint32_t reg)
277
{
278
    for (; reg < 32; reg++) {
279
        if (msr_le)
280
            env->gpr[reg] = bswap32(ldl(addr));
281
        else
282
            env->gpr[reg] = ldl(addr);
283
        addr = addr_add(addr, 4);
284
    }
285
}
286

    
287
void helper_stmw (target_ulong addr, uint32_t reg)
288
{
289
    for (; reg < 32; reg++) {
290
        if (msr_le)
291
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
292
        else
293
            stl(addr, (uint32_t)env->gpr[reg]);
294
        addr = addr_add(addr, 4);
295
    }
296
}
297

    
298
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299
{
300
    int sh;
301
    for (; nb > 3; nb -= 4) {
302
        env->gpr[reg] = ldl(addr);
303
        reg = (reg + 1) % 32;
304
        addr = addr_add(addr, 4);
305
    }
306
    if (unlikely(nb > 0)) {
307
        env->gpr[reg] = 0;
308
        for (sh = 24; nb > 0; nb--, sh -= 8) {
309
            env->gpr[reg] |= ldub(addr) << sh;
310
            addr = addr_add(addr, 1);
311
        }
312
    }
313
}
314
/* PPC32 specification says we must generate an exception if
315
 * rA is in the range of registers to be loaded.
316
 * In an other hand, IBM says this is valid, but rA won't be loaded.
317
 * For now, I'll follow the spec...
318
 */
319
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320
{
321
    if (likely(xer_bc != 0)) {
322
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
323
                     (reg < rb && (reg + xer_bc) > rb))) {
324
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
325
                                       POWERPC_EXCP_INVAL |
326
                                       POWERPC_EXCP_INVAL_LSWX);
327
        } else {
328
            helper_lsw(addr, xer_bc, reg);
329
        }
330
    }
331
}
332

    
333
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334
{
335
    int sh;
336
    for (; nb > 3; nb -= 4) {
337
        stl(addr, env->gpr[reg]);
338
        reg = (reg + 1) % 32;
339
        addr = addr_add(addr, 4);
340
    }
341
    if (unlikely(nb > 0)) {
342
        for (sh = 24; nb > 0; nb--, sh -= 8) {
343
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
344
            addr = addr_add(addr, 1);
345
        }
346
    }
347
}
348

    
349
static void do_dcbz(target_ulong addr, int dcache_line_size)
350
{
351
    addr &= ~(dcache_line_size - 1);
352
    int i;
353
    for (i = 0 ; i < dcache_line_size ; i += 4) {
354
        stl(addr + i , 0);
355
    }
356
    if (env->reserve == addr)
357
        env->reserve = (target_ulong)-1ULL;
358
}
359

    
360
void helper_dcbz(target_ulong addr)
361
{
362
    do_dcbz(addr, env->dcache_line_size);
363
}
364

    
365
void helper_dcbz_970(target_ulong addr)
366
{
367
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
368
        do_dcbz(addr, 32);
369
    else
370
        do_dcbz(addr, env->dcache_line_size);
371
}
372

    
373
void helper_icbi(target_ulong addr)
374
{
375
    uint32_t tmp;
376

    
377
    addr &= ~(env->dcache_line_size - 1);
378
    /* Invalidate one cache line :
379
     * PowerPC specification says this is to be treated like a load
380
     * (not a fetch) by the MMU. To be sure it will be so,
381
     * do the load "by hand".
382
     */
383
    tmp = ldl(addr);
384
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
385
}
386

    
387
// XXX: to be tested
388
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
389
{
390
    int i, c, d;
391
    d = 24;
392
    for (i = 0; i < xer_bc; i++) {
393
        c = ldub(addr);
394
        addr = addr_add(addr, 1);
395
        /* ra (if not 0) and rb are never modified */
396
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
397
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
398
        }
399
        if (unlikely(c == xer_cmp))
400
            break;
401
        if (likely(d != 0)) {
402
            d -= 8;
403
        } else {
404
            d = 24;
405
            reg++;
406
            reg = reg & 0x1F;
407
        }
408
    }
409
    return i;
410
}
411

    
412
/*****************************************************************************/
413
/* Fixed point operations helpers */
414
#if defined(TARGET_PPC64)
415

    
416
/* multiply high word */
417
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
418
{
419
    uint64_t tl, th;
420

    
421
    muls64(&tl, &th, arg1, arg2);
422
    return th;
423
}
424

    
425
/* multiply high word unsigned */
426
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
427
{
428
    uint64_t tl, th;
429

    
430
    mulu64(&tl, &th, arg1, arg2);
431
    return th;
432
}
433

    
434
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
435
{
436
    int64_t th;
437
    uint64_t tl;
438

    
439
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
440
    /* If th != 0 && th != -1, then we had an overflow */
441
    if (likely((uint64_t)(th + 1) <= 1)) {
442
        env->xer &= ~(1 << XER_OV);
443
    } else {
444
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
445
    }
446
    return (int64_t)tl;
447
}
448
#endif
449

    
450
target_ulong helper_cntlzw (target_ulong t)
451
{
452
    return clz32(t);
453
}
454

    
455
#if defined(TARGET_PPC64)
456
target_ulong helper_cntlzd (target_ulong t)
457
{
458
    return clz64(t);
459
}
460
#endif
461

    
462
/* shift right arithmetic helper */
463
target_ulong helper_sraw (target_ulong value, target_ulong shift)
464
{
465
    int32_t ret;
466

    
467
    if (likely(!(shift & 0x20))) {
468
        if (likely((uint32_t)shift != 0)) {
469
            shift &= 0x1f;
470
            ret = (int32_t)value >> shift;
471
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
472
                env->xer &= ~(1 << XER_CA);
473
            } else {
474
                env->xer |= (1 << XER_CA);
475
            }
476
        } else {
477
            ret = (int32_t)value;
478
            env->xer &= ~(1 << XER_CA);
479
        }
480
    } else {
481
        ret = (int32_t)value >> 31;
482
        if (ret) {
483
            env->xer |= (1 << XER_CA);
484
        } else {
485
            env->xer &= ~(1 << XER_CA);
486
        }
487
    }
488
    return (target_long)ret;
489
}
490

    
491
#if defined(TARGET_PPC64)
492
target_ulong helper_srad (target_ulong value, target_ulong shift)
493
{
494
    int64_t ret;
495

    
496
    if (likely(!(shift & 0x40))) {
497
        if (likely((uint64_t)shift != 0)) {
498
            shift &= 0x3f;
499
            ret = (int64_t)value >> shift;
500
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
501
                env->xer &= ~(1 << XER_CA);
502
            } else {
503
                env->xer |= (1 << XER_CA);
504
            }
505
        } else {
506
            ret = (int64_t)value;
507
            env->xer &= ~(1 << XER_CA);
508
        }
509
    } else {
510
        ret = (int64_t)value >> 63;
511
        if (ret) {
512
            env->xer |= (1 << XER_CA);
513
        } else {
514
            env->xer &= ~(1 << XER_CA);
515
        }
516
    }
517
    return ret;
518
}
519
#endif
520

    
521
target_ulong helper_popcntb (target_ulong val)
522
{
523
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
524
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
525
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
526
    return val;
527
}
528

    
529
#if defined(TARGET_PPC64)
530
target_ulong helper_popcntb_64 (target_ulong val)
531
{
532
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
533
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
534
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
535
    return val;
536
}
537
#endif
538

    
539
/*****************************************************************************/
540
/* Floating point operations helpers */
541
uint64_t helper_float32_to_float64(uint32_t arg)
542
{
543
    CPU_FloatU f;
544
    CPU_DoubleU d;
545
    f.l = arg;
546
    d.d = float32_to_float64(f.f, &env->fp_status);
547
    return d.ll;
548
}
549

    
550
uint32_t helper_float64_to_float32(uint64_t arg)
551
{
552
    CPU_FloatU f;
553
    CPU_DoubleU d;
554
    d.ll = arg;
555
    f.f = float64_to_float32(d.d, &env->fp_status);
556
    return f.l;
557
}
558

    
559
static always_inline int isden (float64 d)
560
{
561
    CPU_DoubleU u;
562

    
563
    u.d = d;
564

    
565
    return ((u.ll >> 52) & 0x7FF) == 0;
566
}
567

    
568
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
569
{
570
    CPU_DoubleU farg;
571
    int isneg;
572
    int ret;
573
    farg.ll = arg;
574
    isneg = float64_is_neg(farg.d);
575
    if (unlikely(float64_is_nan(farg.d))) {
576
        if (float64_is_signaling_nan(farg.d)) {
577
            /* Signaling NaN: flags are undefined */
578
            ret = 0x00;
579
        } else {
580
            /* Quiet NaN */
581
            ret = 0x11;
582
        }
583
    } else if (unlikely(float64_is_infinity(farg.d))) {
584
        /* +/- infinity */
585
        if (isneg)
586
            ret = 0x09;
587
        else
588
            ret = 0x05;
589
    } else {
590
        if (float64_is_zero(farg.d)) {
591
            /* +/- zero */
592
            if (isneg)
593
                ret = 0x12;
594
            else
595
                ret = 0x02;
596
        } else {
597
            if (isden(farg.d)) {
598
                /* Denormalized numbers */
599
                ret = 0x10;
600
            } else {
601
                /* Normalized numbers */
602
                ret = 0x00;
603
            }
604
            if (isneg) {
605
                ret |= 0x08;
606
            } else {
607
                ret |= 0x04;
608
            }
609
        }
610
    }
611
    if (set_fprf) {
612
        /* We update FPSCR_FPRF */
613
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
614
        env->fpscr |= ret << FPSCR_FPRF;
615
    }
616
    /* We just need fpcc to update Rc1 */
617
    return ret & 0xF;
618
}
619

    
620
/* Floating-point invalid operations exception */
621
static always_inline uint64_t fload_invalid_op_excp (int op)
622
{
623
    uint64_t ret = 0;
624
    int ve;
625

    
626
    ve = fpscr_ve;
627
    switch (op) {
628
    case POWERPC_EXCP_FP_VXSNAN:
629
        env->fpscr |= 1 << FPSCR_VXSNAN;
630
        break;
631
    case POWERPC_EXCP_FP_VXSOFT:
632
        env->fpscr |= 1 << FPSCR_VXSOFT;
633
        break;
634
    case POWERPC_EXCP_FP_VXISI:
635
        /* Magnitude subtraction of infinities */
636
        env->fpscr |= 1 << FPSCR_VXISI;
637
        goto update_arith;
638
    case POWERPC_EXCP_FP_VXIDI:
639
        /* Division of infinity by infinity */
640
        env->fpscr |= 1 << FPSCR_VXIDI;
641
        goto update_arith;
642
    case POWERPC_EXCP_FP_VXZDZ:
643
        /* Division of zero by zero */
644
        env->fpscr |= 1 << FPSCR_VXZDZ;
645
        goto update_arith;
646
    case POWERPC_EXCP_FP_VXIMZ:
647
        /* Multiplication of zero by infinity */
648
        env->fpscr |= 1 << FPSCR_VXIMZ;
649
        goto update_arith;
650
    case POWERPC_EXCP_FP_VXVC:
651
        /* Ordered comparison of NaN */
652
        env->fpscr |= 1 << FPSCR_VXVC;
653
        env->fpscr &= ~(0xF << FPSCR_FPCC);
654
        env->fpscr |= 0x11 << FPSCR_FPCC;
655
        /* We must update the target FPR before raising the exception */
656
        if (ve != 0) {
657
            env->exception_index = POWERPC_EXCP_PROGRAM;
658
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
659
            /* Update the floating-point enabled exception summary */
660
            env->fpscr |= 1 << FPSCR_FEX;
661
            /* Exception is differed */
662
            ve = 0;
663
        }
664
        break;
665
    case POWERPC_EXCP_FP_VXSQRT:
666
        /* Square root of a negative number */
667
        env->fpscr |= 1 << FPSCR_VXSQRT;
668
    update_arith:
669
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670
        if (ve == 0) {
671
            /* Set the result to quiet NaN */
672
            ret = 0xFFF8000000000000ULL;
673
            env->fpscr &= ~(0xF << FPSCR_FPCC);
674
            env->fpscr |= 0x11 << FPSCR_FPCC;
675
        }
676
        break;
677
    case POWERPC_EXCP_FP_VXCVI:
678
        /* Invalid conversion */
679
        env->fpscr |= 1 << FPSCR_VXCVI;
680
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
681
        if (ve == 0) {
682
            /* Set the result to quiet NaN */
683
            ret = 0xFFF8000000000000ULL;
684
            env->fpscr &= ~(0xF << FPSCR_FPCC);
685
            env->fpscr |= 0x11 << FPSCR_FPCC;
686
        }
687
        break;
688
    }
689
    /* Update the floating-point invalid operation summary */
690
    env->fpscr |= 1 << FPSCR_VX;
691
    /* Update the floating-point exception summary */
692
    env->fpscr |= 1 << FPSCR_FX;
693
    if (ve != 0) {
694
        /* Update the floating-point enabled exception summary */
695
        env->fpscr |= 1 << FPSCR_FEX;
696
        if (msr_fe0 != 0 || msr_fe1 != 0)
697
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
698
    }
699
    return ret;
700
}
701

    
702
static always_inline void float_zero_divide_excp (void)
703
{
704
    env->fpscr |= 1 << FPSCR_ZX;
705
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706
    /* Update the floating-point exception summary */
707
    env->fpscr |= 1 << FPSCR_FX;
708
    if (fpscr_ze != 0) {
709
        /* Update the floating-point enabled exception summary */
710
        env->fpscr |= 1 << FPSCR_FEX;
711
        if (msr_fe0 != 0 || msr_fe1 != 0) {
712
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
713
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
714
        }
715
    }
716
}
717

    
718
static always_inline void float_overflow_excp (void)
719
{
720
    env->fpscr |= 1 << FPSCR_OX;
721
    /* Update the floating-point exception summary */
722
    env->fpscr |= 1 << FPSCR_FX;
723
    if (fpscr_oe != 0) {
724
        /* XXX: should adjust the result */
725
        /* Update the floating-point enabled exception summary */
726
        env->fpscr |= 1 << FPSCR_FEX;
727
        /* We must update the target FPR before raising the exception */
728
        env->exception_index = POWERPC_EXCP_PROGRAM;
729
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730
    } else {
731
        env->fpscr |= 1 << FPSCR_XX;
732
        env->fpscr |= 1 << FPSCR_FI;
733
    }
734
}
735

    
736
static always_inline void float_underflow_excp (void)
737
{
738
    env->fpscr |= 1 << FPSCR_UX;
739
    /* Update the floating-point exception summary */
740
    env->fpscr |= 1 << FPSCR_FX;
741
    if (fpscr_ue != 0) {
742
        /* XXX: should adjust the result */
743
        /* Update the floating-point enabled exception summary */
744
        env->fpscr |= 1 << FPSCR_FEX;
745
        /* We must update the target FPR before raising the exception */
746
        env->exception_index = POWERPC_EXCP_PROGRAM;
747
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748
    }
749
}
750

    
751
static always_inline void float_inexact_excp (void)
752
{
753
    env->fpscr |= 1 << FPSCR_XX;
754
    /* Update the floating-point exception summary */
755
    env->fpscr |= 1 << FPSCR_FX;
756
    if (fpscr_xe != 0) {
757
        /* Update the floating-point enabled exception summary */
758
        env->fpscr |= 1 << FPSCR_FEX;
759
        /* We must update the target FPR before raising the exception */
760
        env->exception_index = POWERPC_EXCP_PROGRAM;
761
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762
    }
763
}
764

    
765
static always_inline void fpscr_set_rounding_mode (void)
766
{
767
    int rnd_type;
768

    
769
    /* Set rounding mode */
770
    switch (fpscr_rn) {
771
    case 0:
772
        /* Best approximation (round to nearest) */
773
        rnd_type = float_round_nearest_even;
774
        break;
775
    case 1:
776
        /* Smaller magnitude (round toward zero) */
777
        rnd_type = float_round_to_zero;
778
        break;
779
    case 2:
780
        /* Round toward +infinite */
781
        rnd_type = float_round_up;
782
        break;
783
    default:
784
    case 3:
785
        /* Round toward -infinite */
786
        rnd_type = float_round_down;
787
        break;
788
    }
789
    set_float_rounding_mode(rnd_type, &env->fp_status);
790
}
791

    
792
void helper_fpscr_clrbit (uint32_t bit)
793
{
794
    int prev;
795

    
796
    prev = (env->fpscr >> bit) & 1;
797
    env->fpscr &= ~(1 << bit);
798
    if (prev == 1) {
799
        switch (bit) {
800
        case FPSCR_RN1:
801
        case FPSCR_RN:
802
            fpscr_set_rounding_mode();
803
            break;
804
        default:
805
            break;
806
        }
807
    }
808
}
809

    
810
void helper_fpscr_setbit (uint32_t bit)
811
{
812
    int prev;
813

    
814
    prev = (env->fpscr >> bit) & 1;
815
    env->fpscr |= 1 << bit;
816
    if (prev == 0) {
817
        switch (bit) {
818
        case FPSCR_VX:
819
            env->fpscr |= 1 << FPSCR_FX;
820
            if (fpscr_ve)
821
                goto raise_ve;
822
        case FPSCR_OX:
823
            env->fpscr |= 1 << FPSCR_FX;
824
            if (fpscr_oe)
825
                goto raise_oe;
826
            break;
827
        case FPSCR_UX:
828
            env->fpscr |= 1 << FPSCR_FX;
829
            if (fpscr_ue)
830
                goto raise_ue;
831
            break;
832
        case FPSCR_ZX:
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ze)
835
                goto raise_ze;
836
            break;
837
        case FPSCR_XX:
838
            env->fpscr |= 1 << FPSCR_FX;
839
            if (fpscr_xe)
840
                goto raise_xe;
841
            break;
842
        case FPSCR_VXSNAN:
843
        case FPSCR_VXISI:
844
        case FPSCR_VXIDI:
845
        case FPSCR_VXZDZ:
846
        case FPSCR_VXIMZ:
847
        case FPSCR_VXVC:
848
        case FPSCR_VXSOFT:
849
        case FPSCR_VXSQRT:
850
        case FPSCR_VXCVI:
851
            env->fpscr |= 1 << FPSCR_VX;
852
            env->fpscr |= 1 << FPSCR_FX;
853
            if (fpscr_ve != 0)
854
                goto raise_ve;
855
            break;
856
        case FPSCR_VE:
857
            if (fpscr_vx != 0) {
858
            raise_ve:
859
                env->error_code = POWERPC_EXCP_FP;
860
                if (fpscr_vxsnan)
861
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
862
                if (fpscr_vxisi)
863
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
864
                if (fpscr_vxidi)
865
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
866
                if (fpscr_vxzdz)
867
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
868
                if (fpscr_vximz)
869
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
870
                if (fpscr_vxvc)
871
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
872
                if (fpscr_vxsoft)
873
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
874
                if (fpscr_vxsqrt)
875
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
876
                if (fpscr_vxcvi)
877
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
878
                goto raise_excp;
879
            }
880
            break;
881
        case FPSCR_OE:
882
            if (fpscr_ox != 0) {
883
            raise_oe:
884
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
885
                goto raise_excp;
886
            }
887
            break;
888
        case FPSCR_UE:
889
            if (fpscr_ux != 0) {
890
            raise_ue:
891
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
892
                goto raise_excp;
893
            }
894
            break;
895
        case FPSCR_ZE:
896
            if (fpscr_zx != 0) {
897
            raise_ze:
898
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
899
                goto raise_excp;
900
            }
901
            break;
902
        case FPSCR_XE:
903
            if (fpscr_xx != 0) {
904
            raise_xe:
905
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
906
                goto raise_excp;
907
            }
908
            break;
909
        case FPSCR_RN1:
910
        case FPSCR_RN:
911
            fpscr_set_rounding_mode();
912
            break;
913
        default:
914
            break;
915
        raise_excp:
916
            /* Update the floating-point enabled exception summary */
917
            env->fpscr |= 1 << FPSCR_FEX;
918
                /* We have to update Rc1 before raising the exception */
919
            env->exception_index = POWERPC_EXCP_PROGRAM;
920
            break;
921
        }
922
    }
923
}
924

    
925
void helper_store_fpscr (uint64_t arg, uint32_t mask)
926
{
927
    /*
928
     * We use only the 32 LSB of the incoming fpr
929
     */
930
    uint32_t prev, new;
931
    int i;
932

    
933
    prev = env->fpscr;
934
    new = (uint32_t)arg;
935
    new &= ~0x60000000;
936
    new |= prev & 0x60000000;
937
    for (i = 0; i < 8; i++) {
938
        if (mask & (1 << i)) {
939
            env->fpscr &= ~(0xF << (4 * i));
940
            env->fpscr |= new & (0xF << (4 * i));
941
        }
942
    }
943
    /* Update VX and FEX */
944
    if (fpscr_ix != 0)
945
        env->fpscr |= 1 << FPSCR_VX;
946
    else
947
        env->fpscr &= ~(1 << FPSCR_VX);
948
    if ((fpscr_ex & fpscr_eex) != 0) {
949
        env->fpscr |= 1 << FPSCR_FEX;
950
        env->exception_index = POWERPC_EXCP_PROGRAM;
951
        /* XXX: we should compute it properly */
952
        env->error_code = POWERPC_EXCP_FP;
953
    }
954
    else
955
        env->fpscr &= ~(1 << FPSCR_FEX);
956
    fpscr_set_rounding_mode();
957
}
958

    
959
void helper_float_check_status (void)
960
{
961
#ifdef CONFIG_SOFTFLOAT
962
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
963
        (env->error_code & POWERPC_EXCP_FP)) {
964
        /* Differred floating-point exception after target FPR update */
965
        if (msr_fe0 != 0 || msr_fe1 != 0)
966
            helper_raise_exception_err(env->exception_index, env->error_code);
967
    } else {
968
        int status = get_float_exception_flags(&env->fp_status);
969
        if (status & float_flag_divbyzero) {
970
            float_zero_divide_excp();
971
        } else if (status & float_flag_overflow) {
972
            float_overflow_excp();
973
        } else if (status & float_flag_underflow) {
974
            float_underflow_excp();
975
        } else if (status & float_flag_inexact) {
976
            float_inexact_excp();
977
        }
978
    }
979
#else
980
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
981
        (env->error_code & POWERPC_EXCP_FP)) {
982
        /* Differred floating-point exception after target FPR update */
983
        if (msr_fe0 != 0 || msr_fe1 != 0)
984
            helper_raise_exception_err(env->exception_index, env->error_code);
985
    }
986
#endif
987
}
988

    
989
#ifdef CONFIG_SOFTFLOAT
990
void helper_reset_fpstatus (void)
991
{
992
    set_float_exception_flags(0, &env->fp_status);
993
}
994
#endif
995

    
996
/* fadd - fadd. */
997
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
998
{
999
    CPU_DoubleU farg1, farg2;
1000

    
1001
    farg1.ll = arg1;
1002
    farg2.ll = arg2;
1003
#if USE_PRECISE_EMULATION
1004
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005
                 float64_is_signaling_nan(farg2.d))) {
1006
        /* sNaN addition */
1007
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1010
        /* Magnitude subtraction of infinities */
1011
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012
    } else {
1013
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1014
    }
1015
#else
1016
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017
#endif
1018
    return farg1.ll;
1019
}
1020

    
1021
/* fsub - fsub. */
1022
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1023
{
1024
    CPU_DoubleU farg1, farg2;
1025

    
1026
    farg1.ll = arg1;
1027
    farg2.ll = arg2;
1028
#if USE_PRECISE_EMULATION
1029
{
1030
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031
                 float64_is_signaling_nan(farg2.d))) {
1032
        /* sNaN subtraction */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1035
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1036
        /* Magnitude subtraction of infinities */
1037
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1038
    } else {
1039
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1040
    }
1041
}
1042
#else
1043
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044
#endif
1045
    return farg1.ll;
1046
}
1047

    
1048
/* fmul - fmul. */
1049
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1050
{
1051
    CPU_DoubleU farg1, farg2;
1052

    
1053
    farg1.ll = arg1;
1054
    farg2.ll = arg2;
1055
#if USE_PRECISE_EMULATION
1056
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1057
                 float64_is_signaling_nan(farg2.d))) {
1058
        /* sNaN multiplication */
1059
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1060
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1061
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1062
        /* Multiplication of zero by infinity */
1063
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064
    } else {
1065
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1066
    }
1067
#else
1068
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069
#endif
1070
    return farg1.ll;
1071
}
1072

    
1073
/* fdiv - fdiv. */
1074
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1075
{
1076
    CPU_DoubleU farg1, farg2;
1077

    
1078
    farg1.ll = arg1;
1079
    farg2.ll = arg2;
1080
#if USE_PRECISE_EMULATION
1081
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1082
                 float64_is_signaling_nan(farg2.d))) {
1083
        /* sNaN division */
1084
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1085
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1086
        /* Division of infinity by infinity */
1087
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1088
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1089
        /* Division of zero by zero */
1090
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1091
    } else {
1092
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1093
    }
1094
#else
1095
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096
#endif
1097
    return farg1.ll;
1098
}
1099

    
1100
/* fabs */
1101
uint64_t helper_fabs (uint64_t arg)
1102
{
1103
    CPU_DoubleU farg;
1104

    
1105
    farg.ll = arg;
1106
    farg.d = float64_abs(farg.d);
1107
    return farg.ll;
1108
}
1109

    
1110
/* fnabs */
1111
uint64_t helper_fnabs (uint64_t arg)
1112
{
1113
    CPU_DoubleU farg;
1114

    
1115
    farg.ll = arg;
1116
    farg.d = float64_abs(farg.d);
1117
    farg.d = float64_chs(farg.d);
1118
    return farg.ll;
1119
}
1120

    
1121
/* fneg */
1122
uint64_t helper_fneg (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125

    
1126
    farg.ll = arg;
1127
    farg.d = float64_chs(farg.d);
1128
    return farg.ll;
1129
}
1130

    
1131
/* fctiw - fctiw. */
1132
uint64_t helper_fctiw (uint64_t arg)
1133
{
1134
    CPU_DoubleU farg;
1135
    farg.ll = arg;
1136

    
1137
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1138
        /* sNaN conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141
        /* qNan / infinity conversion */
1142
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143
    } else {
1144
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1145
#if USE_PRECISE_EMULATION
1146
        /* XXX: higher bits are not supposed to be significant.
1147
         *     to make tests easier, return the same as a real PowerPC 750
1148
         */
1149
        farg.ll |= 0xFFF80000ULL << 32;
1150
#endif
1151
    }
1152
    return farg.ll;
1153
}
1154

    
1155
/* fctiwz - fctiwz. */
1156
uint64_t helper_fctiwz (uint64_t arg)
1157
{
1158
    CPU_DoubleU farg;
1159
    farg.ll = arg;
1160

    
1161
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1162
        /* sNaN conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1164
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1165
        /* qNan / infinity conversion */
1166
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167
    } else {
1168
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1169
#if USE_PRECISE_EMULATION
1170
        /* XXX: higher bits are not supposed to be significant.
1171
         *     to make tests easier, return the same as a real PowerPC 750
1172
         */
1173
        farg.ll |= 0xFFF80000ULL << 32;
1174
#endif
1175
    }
1176
    return farg.ll;
1177
}
1178

    
1179
#if defined(TARGET_PPC64)
1180
/* fcfid - fcfid. */
1181
uint64_t helper_fcfid (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.d = int64_to_float64(arg, &env->fp_status);
1185
    return farg.ll;
1186
}
1187

    
1188
/* fctid - fctid. */
1189
uint64_t helper_fctid (uint64_t arg)
1190
{
1191
    CPU_DoubleU farg;
1192
    farg.ll = arg;
1193

    
1194
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1195
        /* sNaN conversion */
1196
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1197
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1198
        /* qNan / infinity conversion */
1199
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1200
    } else {
1201
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1202
    }
1203
    return farg.ll;
1204
}
1205

    
1206
/* fctidz - fctidz. */
1207
uint64_t helper_fctidz (uint64_t arg)
1208
{
1209
    CPU_DoubleU farg;
1210
    farg.ll = arg;
1211

    
1212
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1213
        /* sNaN conversion */
1214
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1215
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1216
        /* qNan / infinity conversion */
1217
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1218
    } else {
1219
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1220
    }
1221
    return farg.ll;
1222
}
1223

    
1224
#endif
1225

    
1226
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1227
{
1228
    CPU_DoubleU farg;
1229
    farg.ll = arg;
1230

    
1231
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1232
        /* sNaN round */
1233
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1234
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1235
        /* qNan / infinity round */
1236
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1237
    } else {
1238
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1239
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1240
        /* Restore rounding mode from FPSCR */
1241
        fpscr_set_rounding_mode();
1242
    }
1243
    return farg.ll;
1244
}
1245

    
1246
uint64_t helper_frin (uint64_t arg)
1247
{
1248
    return do_fri(arg, float_round_nearest_even);
1249
}
1250

    
1251
uint64_t helper_friz (uint64_t arg)
1252
{
1253
    return do_fri(arg, float_round_to_zero);
1254
}
1255

    
1256
uint64_t helper_frip (uint64_t arg)
1257
{
1258
    return do_fri(arg, float_round_up);
1259
}
1260

    
1261
uint64_t helper_frim (uint64_t arg)
1262
{
1263
    return do_fri(arg, float_round_down);
1264
}
1265

    
1266
/* fmadd - fmadd. */
1267
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1268
{
1269
    CPU_DoubleU farg1, farg2, farg3;
1270

    
1271
    farg1.ll = arg1;
1272
    farg2.ll = arg2;
1273
    farg3.ll = arg3;
1274
#if USE_PRECISE_EMULATION
1275
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1276
                 float64_is_signaling_nan(farg2.d) ||
1277
                 float64_is_signaling_nan(farg3.d))) {
1278
        /* sNaN operation */
1279
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1280
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1281
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1282
        /* Multiplication of zero by infinity */
1283
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1284
    } else {
1285
#ifdef FLOAT128
1286
        /* This is the way the PowerPC specification defines it */
1287
        float128 ft0_128, ft1_128;
1288

    
1289
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1290
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1291
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1292
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1293
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1294
            /* Magnitude subtraction of infinities */
1295
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1296
        } else {
1297
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1298
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1299
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1300
        }
1301
#else
1302
        /* This is OK on x86 hosts */
1303
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1304
#endif
1305
    }
1306
#else
1307
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1308
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1309
#endif
1310
    return farg1.ll;
1311
}
1312

    
1313
/* fmsub - fmsub. */
1314
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315
{
1316
    CPU_DoubleU farg1, farg2, farg3;
1317

    
1318
    farg1.ll = arg1;
1319
    farg2.ll = arg2;
1320
    farg3.ll = arg3;
1321
#if USE_PRECISE_EMULATION
1322
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323
                 float64_is_signaling_nan(farg2.d) ||
1324
                 float64_is_signaling_nan(farg3.d))) {
1325
        /* sNaN operation */
1326
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329
        /* Multiplication of zero by infinity */
1330
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331
    } else {
1332
#ifdef FLOAT128
1333
        /* This is the way the PowerPC specification defines it */
1334
        float128 ft0_128, ft1_128;
1335

    
1336
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1341
            /* Magnitude subtraction of infinities */
1342
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343
        } else {
1344
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1346
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347
        }
1348
#else
1349
        /* This is OK on x86 hosts */
1350
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1351
#endif
1352
    }
1353
#else
1354
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1355
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1356
#endif
1357
    return farg1.ll;
1358
}
1359

    
1360
/* fnmadd - fnmadd. */
1361
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1362
{
1363
    CPU_DoubleU farg1, farg2, farg3;
1364

    
1365
    farg1.ll = arg1;
1366
    farg2.ll = arg2;
1367
    farg3.ll = arg3;
1368

    
1369
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1370
                 float64_is_signaling_nan(farg2.d) ||
1371
                 float64_is_signaling_nan(farg3.d))) {
1372
        /* sNaN operation */
1373
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1374
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376
        /* Multiplication of zero by infinity */
1377
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378
    } else {
1379
#if USE_PRECISE_EMULATION
1380
#ifdef FLOAT128
1381
        /* This is the way the PowerPC specification defines it */
1382
        float128 ft0_128, ft1_128;
1383

    
1384
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1388
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1389
            /* Magnitude subtraction of infinities */
1390
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1391
        } else {
1392
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1393
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1394
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1395
        }
1396
#else
1397
        /* This is OK on x86 hosts */
1398
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1399
#endif
1400
#else
1401
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1402
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1403
#endif
1404
        if (likely(!float64_is_nan(farg1.d)))
1405
            farg1.d = float64_chs(farg1.d);
1406
    }
1407
    return farg1.ll;
1408
}
1409

    
1410
/* fnmsub - fnmsub. */
1411
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1412
{
1413
    CPU_DoubleU farg1, farg2, farg3;
1414

    
1415
    farg1.ll = arg1;
1416
    farg2.ll = arg2;
1417
    farg3.ll = arg3;
1418

    
1419
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1420
                 float64_is_signaling_nan(farg2.d) ||
1421
                 float64_is_signaling_nan(farg3.d))) {
1422
        /* sNaN operation */
1423
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1424
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1425
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1426
        /* Multiplication of zero by infinity */
1427
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1428
    } else {
1429
#if USE_PRECISE_EMULATION
1430
#ifdef FLOAT128
1431
        /* This is the way the PowerPC specification defines it */
1432
        float128 ft0_128, ft1_128;
1433

    
1434
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1435
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1436
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1437
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1438
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1439
            /* Magnitude subtraction of infinities */
1440
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1441
        } else {
1442
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445
        }
1446
#else
1447
        /* This is OK on x86 hosts */
1448
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1449
#endif
1450
#else
1451
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1452
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1453
#endif
1454
        if (likely(!float64_is_nan(farg1.d)))
1455
            farg1.d = float64_chs(farg1.d);
1456
    }
1457
    return farg1.ll;
1458
}
1459

    
1460
/* frsp - frsp. */
1461
uint64_t helper_frsp (uint64_t arg)
1462
{
1463
    CPU_DoubleU farg;
1464
    float32 f32;
1465
    farg.ll = arg;
1466

    
1467
#if USE_PRECISE_EMULATION
1468
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
        /* sNaN square root */
1470
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471
    } else {
1472
       f32 = float64_to_float32(farg.d, &env->fp_status);
1473
       farg.d = float32_to_float64(f32, &env->fp_status);
1474
    }
1475
#else
1476
    f32 = float64_to_float32(farg.d, &env->fp_status);
1477
    farg.d = float32_to_float64(f32, &env->fp_status);
1478
#endif
1479
    return farg.ll;
1480
}
1481

    
1482
/* fsqrt - fsqrt. */
1483
uint64_t helper_fsqrt (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1489
        /* sNaN square root */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1492
        /* Square root of a negative nonzero number */
1493
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494
    } else {
1495
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1496
    }
1497
    return farg.ll;
1498
}
1499

    
1500
/* fre - fre. */
1501
uint64_t helper_fre (uint64_t arg)
1502
{
1503
    CPU_DoubleU fone, farg;
1504
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1508
        /* sNaN reciprocal */
1509
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510
    } else {
1511
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1512
    }
1513
    return farg.d;
1514
}
1515

    
1516
/* fres - fres. */
1517
uint64_t helper_fres (uint64_t arg)
1518
{
1519
    CPU_DoubleU fone, farg;
1520
    float32 f32;
1521
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1522
    farg.ll = arg;
1523

    
1524
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1525
        /* sNaN reciprocal */
1526
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527
    } else {
1528
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1529
        f32 = float64_to_float32(farg.d, &env->fp_status);
1530
        farg.d = float32_to_float64(f32, &env->fp_status);
1531
    }
1532
    return farg.ll;
1533
}
1534

    
1535
/* frsqrte  - frsqrte. */
1536
uint64_t helper_frsqrte (uint64_t arg)
1537
{
1538
    CPU_DoubleU fone, farg;
1539
    float32 f32;
1540
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1541
    farg.ll = arg;
1542

    
1543
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1544
        /* sNaN reciprocal square root */
1545
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1546
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1547
        /* Reciprocal square root of a negative nonzero number */
1548
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1549
    } else {
1550
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1551
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1552
        f32 = float64_to_float32(farg.d, &env->fp_status);
1553
        farg.d = float32_to_float64(f32, &env->fp_status);
1554
    }
1555
    return farg.ll;
1556
}
1557

    
1558
/* fsel - fsel. */
1559
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1560
{
1561
    CPU_DoubleU farg1;
1562

    
1563
    farg1.ll = arg1;
1564

    
1565
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1566
        return arg2;
1567
    else
1568
        return arg3;
1569
}
1570

    
1571
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572
{
1573
    CPU_DoubleU farg1, farg2;
1574
    uint32_t ret = 0;
1575
    farg1.ll = arg1;
1576
    farg2.ll = arg2;
1577

    
1578
    if (unlikely(float64_is_nan(farg1.d) ||
1579
                 float64_is_nan(farg2.d))) {
1580
        ret = 0x01UL;
1581
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582
        ret = 0x08UL;
1583
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x04UL;
1585
    } else {
1586
        ret = 0x02UL;
1587
    }
1588

    
1589
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590
    env->fpscr |= ret << FPSCR_FPRF;
1591
    env->crf[crfD] = ret;
1592
    if (unlikely(ret == 0x01UL
1593
                 && (float64_is_signaling_nan(farg1.d) ||
1594
                     float64_is_signaling_nan(farg2.d)))) {
1595
        /* sNaN comparison */
1596
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1597
    }
1598
}
1599

    
1600
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1601
{
1602
    CPU_DoubleU farg1, farg2;
1603
    uint32_t ret = 0;
1604
    farg1.ll = arg1;
1605
    farg2.ll = arg2;
1606

    
1607
    if (unlikely(float64_is_nan(farg1.d) ||
1608
                 float64_is_nan(farg2.d))) {
1609
        ret = 0x01UL;
1610
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1611
        ret = 0x08UL;
1612
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1613
        ret = 0x04UL;
1614
    } else {
1615
        ret = 0x02UL;
1616
    }
1617

    
1618
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1619
    env->fpscr |= ret << FPSCR_FPRF;
1620
    env->crf[crfD] = ret;
1621
    if (unlikely (ret == 0x01UL)) {
1622
        if (float64_is_signaling_nan(farg1.d) ||
1623
            float64_is_signaling_nan(farg2.d)) {
1624
            /* sNaN comparison */
1625
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1626
                                  POWERPC_EXCP_FP_VXVC);
1627
        } else {
1628
            /* qNaN comparison */
1629
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1630
        }
1631
    }
1632
}
1633

    
1634
#if !defined (CONFIG_USER_ONLY)
1635
void helper_store_msr (target_ulong val)
1636
{
1637
    val = hreg_store_msr(env, val, 0);
1638
    if (val != 0) {
1639
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1640
        helper_raise_exception(val);
1641
    }
1642
}
1643

    
1644
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1645
                                    target_ulong msrm, int keep_msrh)
1646
{
1647
#if defined(TARGET_PPC64)
1648
    if (msr & (1ULL << MSR_SF)) {
1649
        nip = (uint64_t)nip;
1650
        msr &= (uint64_t)msrm;
1651
    } else {
1652
        nip = (uint32_t)nip;
1653
        msr = (uint32_t)(msr & msrm);
1654
        if (keep_msrh)
1655
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656
    }
1657
#else
1658
    nip = (uint32_t)nip;
1659
    msr &= (uint32_t)msrm;
1660
#endif
1661
    /* XXX: beware: this is false if VLE is supported */
1662
    env->nip = nip & ~((target_ulong)0x00000003);
1663
    hreg_store_msr(env, msr, 1);
1664
#if defined (DEBUG_OP)
1665
    cpu_dump_rfi(env->nip, env->msr);
1666
#endif
1667
    /* No need to raise an exception here,
1668
     * as rfi is always the last insn of a TB
1669
     */
1670
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671
}
1672

    
1673
void helper_rfi (void)
1674
{
1675
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1676
           ~((target_ulong)0xFFFF0000), 1);
1677
}
1678

    
1679
#if defined(TARGET_PPC64)
1680
void helper_rfid (void)
1681
{
1682
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1683
           ~((target_ulong)0xFFFF0000), 0);
1684
}
1685

    
1686
void helper_hrfid (void)
1687
{
1688
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1689
           ~((target_ulong)0xFFFF0000), 0);
1690
}
1691
#endif
1692
#endif
1693

    
1694
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1695
{
1696
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1697
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1698
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1699
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1700
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1701
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1702
    }
1703
}
1704

    
1705
#if defined(TARGET_PPC64)
1706
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1707
{
1708
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1709
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1710
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1711
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1712
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1713
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1714
}
1715
#endif
1716

    
1717
/*****************************************************************************/
1718
/* PowerPC 601 specific instructions (POWER bridge) */
1719

    
1720
target_ulong helper_clcs (uint32_t arg)
1721
{
1722
    switch (arg) {
1723
    case 0x0CUL:
1724
        /* Instruction cache line size */
1725
        return env->icache_line_size;
1726
        break;
1727
    case 0x0DUL:
1728
        /* Data cache line size */
1729
        return env->dcache_line_size;
1730
        break;
1731
    case 0x0EUL:
1732
        /* Minimum cache line size */
1733
        return (env->icache_line_size < env->dcache_line_size) ?
1734
                env->icache_line_size : env->dcache_line_size;
1735
        break;
1736
    case 0x0FUL:
1737
        /* Maximum cache line size */
1738
        return (env->icache_line_size > env->dcache_line_size) ?
1739
                env->icache_line_size : env->dcache_line_size;
1740
        break;
1741
    default:
1742
        /* Undefined */
1743
        return 0;
1744
        break;
1745
    }
1746
}
1747

    
1748
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1749
{
1750
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1751

    
1752
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->spr[SPR_MQ] = tmp % arg2;
1758
        return  tmp / (int32_t)arg2;
1759
    }
1760
}
1761

    
1762
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1763
{
1764
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1765

    
1766
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1767
        (int32_t)arg2 == 0) {
1768
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1769
        env->spr[SPR_MQ] = 0;
1770
        return INT32_MIN;
1771
    } else {
1772
        env->spr[SPR_MQ] = tmp % arg2;
1773
        tmp /= (int32_t)arg2;
1774
        if ((int32_t)tmp != tmp) {
1775
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1776
        } else {
1777
            env->xer &= ~(1 << XER_OV);
1778
        }
1779
        return tmp;
1780
    }
1781
}
1782

    
1783
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1784
{
1785
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786
        (int32_t)arg2 == 0) {
1787
        env->spr[SPR_MQ] = 0;
1788
        return INT32_MIN;
1789
    } else {
1790
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1791
        return (int32_t)arg1 / (int32_t)arg2;
1792
    }
1793
}
1794

    
1795
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1796
{
1797
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1798
        (int32_t)arg2 == 0) {
1799
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1800
        env->spr[SPR_MQ] = 0;
1801
        return INT32_MIN;
1802
    } else {
1803
        env->xer &= ~(1 << XER_OV);
1804
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1805
        return (int32_t)arg1 / (int32_t)arg2;
1806
    }
1807
}
1808

    
1809
#if !defined (CONFIG_USER_ONLY)
1810
target_ulong helper_rac (target_ulong addr)
1811
{
1812
    mmu_ctx_t ctx;
1813
    int nb_BATs;
1814
    target_ulong ret = 0;
1815

    
1816
    /* We don't have to generate many instances of this instruction,
1817
     * as rac is supervisor only.
1818
     */
1819
    /* XXX: FIX THIS: Pretend we have no BAT */
1820
    nb_BATs = env->nb_BATs;
1821
    env->nb_BATs = 0;
1822
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1823
        ret = ctx.raddr;
1824
    env->nb_BATs = nb_BATs;
1825
    return ret;
1826
}
1827

    
1828
void helper_rfsvc (void)
1829
{
1830
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1831
}
1832
#endif
1833

    
1834
/*****************************************************************************/
1835
/* 602 specific instructions */
1836
/* mfrom is the most crazy instruction ever seen, imho ! */
1837
/* Real implementation uses a ROM table. Do the same */
1838
/* Extremly decomposed:
1839
 *                      -arg / 256
1840
 * return 256 * log10(10           + 1.0) + 0.5
1841
 */
1842
#if !defined (CONFIG_USER_ONLY)
1843
target_ulong helper_602_mfrom (target_ulong arg)
1844
{
1845
    if (likely(arg < 602)) {
1846
#include "mfrom_table.c"
1847
        return mfrom_ROM_table[arg];
1848
    } else {
1849
        return 0;
1850
    }
1851
}
1852
#endif
1853

    
1854
/*****************************************************************************/
1855
/* Embedded PowerPC specific helpers */
1856

    
1857
/* XXX: to be improved to check access rights when in user-mode */
1858
target_ulong helper_load_dcr (target_ulong dcrn)
1859
{
1860
    target_ulong val = 0;
1861

    
1862
    if (unlikely(env->dcr_env == NULL)) {
1863
        if (loglevel != 0) {
1864
            fprintf(logfile, "No DCR environment\n");
1865
        }
1866
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1868
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1869
        if (loglevel != 0) {
1870
            fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1871
        }
1872
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1873
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1874
    }
1875
    return val;
1876
}
1877

    
1878
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1879
{
1880
    if (unlikely(env->dcr_env == NULL)) {
1881
        if (loglevel != 0) {
1882
            fprintf(logfile, "No DCR environment\n");
1883
        }
1884
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1885
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1886
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1887
        if (loglevel != 0) {
1888
            fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1889
        }
1890
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1891
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1892
    }
1893
}
1894

    
1895
#if !defined(CONFIG_USER_ONLY)
1896
void helper_40x_rfci (void)
1897
{
1898
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1899
           ~((target_ulong)0xFFFF0000), 0);
1900
}
1901

    
1902
void helper_rfci (void)
1903
{
1904
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1905
           ~((target_ulong)0x3FFF0000), 0);
1906
}
1907

    
1908
void helper_rfdi (void)
1909
{
1910
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1911
           ~((target_ulong)0x3FFF0000), 0);
1912
}
1913

    
1914
void helper_rfmci (void)
1915
{
1916
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1917
           ~((target_ulong)0x3FFF0000), 0);
1918
}
1919
#endif
1920

    
1921
/* 440 specific */
1922
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1923
{
1924
    target_ulong mask;
1925
    int i;
1926

    
1927
    i = 1;
1928
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929
        if ((high & mask) == 0) {
1930
            if (update_Rc) {
1931
                env->crf[0] = 0x4;
1932
            }
1933
            goto done;
1934
        }
1935
        i++;
1936
    }
1937
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1938
        if ((low & mask) == 0) {
1939
            if (update_Rc) {
1940
                env->crf[0] = 0x8;
1941
            }
1942
            goto done;
1943
        }
1944
        i++;
1945
    }
1946
    if (update_Rc) {
1947
        env->crf[0] = 0x2;
1948
    }
1949
 done:
1950
    env->xer = (env->xer & ~0x7F) | i;
1951
    if (update_Rc) {
1952
        env->crf[0] |= xer_so;
1953
    }
1954
    return i;
1955
}
1956

    
1957
/*****************************************************************************/
1958
/* Altivec extension helpers */
1959
#if defined(WORDS_BIGENDIAN)
1960
#define HI_IDX 0
1961
#define LO_IDX 1
1962
#else
1963
#define HI_IDX 1
1964
#define LO_IDX 0
1965
#endif
1966

    
1967
#if defined(WORDS_BIGENDIAN)
1968
#define VECTOR_FOR_INORDER_I(index, element)            \
1969
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1970
#else
1971
#define VECTOR_FOR_INORDER_I(index, element)            \
1972
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1973
#endif
1974

    
1975
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1976
{
1977
    int i, j = (sh & 0xf);
1978

    
1979
    VECTOR_FOR_INORDER_I (i, u8) {
1980
        r->u8[i] = j++;
1981
    }
1982
}
1983

    
1984
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1985
{
1986
    int i, j = 0x10 - (sh & 0xf);
1987

    
1988
    VECTOR_FOR_INORDER_I (i, u8) {
1989
        r->u8[i] = j++;
1990
    }
1991
}
1992

    
1993
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1994
{
1995
    int i;
1996
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1997
        r->u32[i] = ~a->u32[i] < b->u32[i];
1998
    }
1999
}
2000

    
2001
#define VARITH_DO(name, op, element)        \
2002
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2003
{                                                                       \
2004
    int i;                                                              \
2005
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2006
        r->element[i] = a->element[i] op b->element[i];                 \
2007
    }                                                                   \
2008
}
2009
#define VARITH(suffix, element)                  \
2010
  VARITH_DO(add##suffix, +, element)             \
2011
  VARITH_DO(sub##suffix, -, element)
2012
VARITH(ubm, u8)
2013
VARITH(uhm, u16)
2014
VARITH(uwm, u32)
2015
#undef VARITH_DO
2016
#undef VARITH
2017

    
2018
#define VAVG_DO(name, element, etype)                                   \
2019
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2020
    {                                                                   \
2021
        int i;                                                          \
2022
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2023
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2024
            r->element[i] = x >> 1;                                     \
2025
        }                                                               \
2026
    }
2027

    
2028
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2029
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2030
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2031
VAVG(b, s8, int16_t, u8, uint16_t)
2032
VAVG(h, s16, int32_t, u16, uint32_t)
2033
VAVG(w, s32, int64_t, u32, uint64_t)
2034
#undef VAVG_DO
2035
#undef VAVG
2036

    
2037
#define VMINMAX_DO(name, compare, element)                              \
2038
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2039
    {                                                                   \
2040
        int i;                                                          \
2041
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2042
            if (a->element[i] compare b->element[i]) {                  \
2043
                r->element[i] = b->element[i];                          \
2044
            } else {                                                    \
2045
                r->element[i] = a->element[i];                          \
2046
            }                                                           \
2047
        }                                                               \
2048
    }
2049
#define VMINMAX(suffix, element)                \
2050
  VMINMAX_DO(min##suffix, >, element)           \
2051
  VMINMAX_DO(max##suffix, <, element)
2052
VMINMAX(sb, s8)
2053
VMINMAX(sh, s16)
2054
VMINMAX(sw, s32)
2055
VMINMAX(ub, u8)
2056
VMINMAX(uh, u16)
2057
VMINMAX(uw, u32)
2058
#undef VMINMAX_DO
2059
#undef VMINMAX
2060

    
2061
#define VMRG_DO(name, element, highp)                                   \
2062
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2063
    {                                                                   \
2064
        ppc_avr_t result;                                               \
2065
        int i;                                                          \
2066
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2067
        for (i = 0; i < n_elems/2; i++) {                               \
2068
            if (highp) {                                                \
2069
                result.element[i*2+HI_IDX] = a->element[i];             \
2070
                result.element[i*2+LO_IDX] = b->element[i];             \
2071
            } else {                                                    \
2072
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2073
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2074
            }                                                           \
2075
        }                                                               \
2076
        *r = result;                                                    \
2077
    }
2078
#if defined(WORDS_BIGENDIAN)
2079
#define MRGHI 0
2080
#define MRGL0 1
2081
#else
2082
#define MRGHI 1
2083
#define MRGLO 0
2084
#endif
2085
#define VMRG(suffix, element)                   \
2086
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2087
  VMRG_DO(mrgh##suffix, element, MRGLO)
2088
VMRG(b, u8)
2089
VMRG(h, u16)
2090
VMRG(w, u32)
2091
#undef VMRG_DO
2092
#undef VMRG
2093
#undef MRGHI
2094
#undef MRGLO
2095

    
2096
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2097
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2098
    {                                                                   \
2099
        int i;                                                          \
2100
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2101
            if (evenp) {                                                \
2102
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2103
            } else {                                                    \
2104
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2105
            }                                                           \
2106
        }                                                               \
2107
    }
2108
#define VMUL(suffix, mul_element, prod_element) \
2109
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2110
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2111
VMUL(sb, s8, s16)
2112
VMUL(sh, s16, s32)
2113
VMUL(ub, u8, u16)
2114
VMUL(uh, u16, u32)
2115
#undef VMUL_DO
2116
#undef VMUL
2117

    
2118
#define VROTATE(suffix, element)                                        \
2119
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2120
    {                                                                   \
2121
        int i;                                                          \
2122
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2123
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2124
            unsigned int shift = b->element[i] & mask;                  \
2125
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2126
        }                                                               \
2127
    }
2128
VROTATE(b, u8)
2129
VROTATE(h, u16)
2130
VROTATE(w, u32)
2131
#undef VROTATE
2132

    
2133
#define VSL(suffix, element)                                            \
2134
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2135
    {                                                                   \
2136
        int i;                                                          \
2137
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2138
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2139
            unsigned int shift = b->element[i] & mask;                  \
2140
            r->element[i] = a->element[i] << shift;                     \
2141
        }                                                               \
2142
    }
2143
VSL(b, u8)
2144
VSL(h, u16)
2145
VSL(w, u32)
2146
#undef VSL
2147

    
2148
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2149
{
2150
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2151

    
2152
#if defined (WORDS_BIGENDIAN)
2153
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2154
  memset (&r->u8[16-sh], 0, sh);
2155
#else
2156
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2157
  memset (&r->u8[0], 0, sh);
2158
#endif
2159
}
2160

    
2161
#define VSR(suffix, element)                                            \
2162
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2163
    {                                                                   \
2164
        int i;                                                          \
2165
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2166
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2167
            unsigned int shift = b->element[i] & mask;                  \
2168
            r->element[i] = a->element[i] >> shift;                     \
2169
        }                                                               \
2170
    }
2171
VSR(ab, s8)
2172
VSR(ah, s16)
2173
VSR(aw, s32)
2174
VSR(b, u8)
2175
VSR(h, u16)
2176
VSR(w, u32)
2177
#undef VSR
2178

    
2179
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2180
{
2181
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2182

    
2183
#if defined (WORDS_BIGENDIAN)
2184
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2185
  memset (&r->u8[0], 0, sh);
2186
#else
2187
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2188
  memset (&r->u8[16-sh], 0, sh);
2189
#endif
2190
}
2191

    
2192
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2193
{
2194
    int i;
2195
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2196
        r->u32[i] = a->u32[i] >= b->u32[i];
2197
    }
2198
}
2199

    
2200
#undef VECTOR_FOR_INORDER_I
2201
#undef HI_IDX
2202
#undef LO_IDX
2203

    
2204
/*****************************************************************************/
2205
/* SPE extension helpers */
2206
/* Use a table to make this quicker */
2207
static uint8_t hbrev[16] = {
2208
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2209
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2210
};
2211

    
2212
static always_inline uint8_t byte_reverse (uint8_t val)
2213
{
2214
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2215
}
2216

    
2217
static always_inline uint32_t word_reverse (uint32_t val)
2218
{
2219
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2220
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2221
}
2222

    
2223
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2224
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2225
{
2226
    uint32_t a, b, d, mask;
2227

    
2228
    mask = UINT32_MAX >> (32 - MASKBITS);
2229
    a = arg1 & mask;
2230
    b = arg2 & mask;
2231
    d = word_reverse(1 + word_reverse(a | ~b));
2232
    return (arg1 & ~mask) | (d & b);
2233
}
2234

    
2235
uint32_t helper_cntlsw32 (uint32_t val)
2236
{
2237
    if (val & 0x80000000)
2238
        return clz32(~val);
2239
    else
2240
        return clz32(val);
2241
}
2242

    
2243
uint32_t helper_cntlzw32 (uint32_t val)
2244
{
2245
    return clz32(val);
2246
}
2247

    
2248
/* Single-precision floating-point conversions */
2249
static always_inline uint32_t efscfsi (uint32_t val)
2250
{
2251
    CPU_FloatU u;
2252

    
2253
    u.f = int32_to_float32(val, &env->spe_status);
2254

    
2255
    return u.l;
2256
}
2257

    
2258
static always_inline uint32_t efscfui (uint32_t val)
2259
{
2260
    CPU_FloatU u;
2261

    
2262
    u.f = uint32_to_float32(val, &env->spe_status);
2263

    
2264
    return u.l;
2265
}
2266

    
2267
static always_inline int32_t efsctsi (uint32_t val)
2268
{
2269
    CPU_FloatU u;
2270

    
2271
    u.l = val;
2272
    /* NaN are not treated the same way IEEE 754 does */
2273
    if (unlikely(float32_is_nan(u.f)))
2274
        return 0;
2275

    
2276
    return float32_to_int32(u.f, &env->spe_status);
2277
}
2278

    
2279
static always_inline uint32_t efsctui (uint32_t val)
2280
{
2281
    CPU_FloatU u;
2282

    
2283
    u.l = val;
2284
    /* NaN are not treated the same way IEEE 754 does */
2285
    if (unlikely(float32_is_nan(u.f)))
2286
        return 0;
2287

    
2288
    return float32_to_uint32(u.f, &env->spe_status);
2289
}
2290

    
2291
static always_inline uint32_t efsctsiz (uint32_t val)
2292
{
2293
    CPU_FloatU u;
2294

    
2295
    u.l = val;
2296
    /* NaN are not treated the same way IEEE 754 does */
2297
    if (unlikely(float32_is_nan(u.f)))
2298
        return 0;
2299

    
2300
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2301
}
2302

    
2303
static always_inline uint32_t efsctuiz (uint32_t val)
2304
{
2305
    CPU_FloatU u;
2306

    
2307
    u.l = val;
2308
    /* NaN are not treated the same way IEEE 754 does */
2309
    if (unlikely(float32_is_nan(u.f)))
2310
        return 0;
2311

    
2312
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2313
}
2314

    
2315
static always_inline uint32_t efscfsf (uint32_t val)
2316
{
2317
    CPU_FloatU u;
2318
    float32 tmp;
2319

    
2320
    u.f = int32_to_float32(val, &env->spe_status);
2321
    tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2322
    u.f = float32_div(u.f, tmp, &env->spe_status);
2323

    
2324
    return u.l;
2325
}
2326

    
2327
static always_inline uint32_t efscfuf (uint32_t val)
2328
{
2329
    CPU_FloatU u;
2330
    float32 tmp;
2331

    
2332
    u.f = uint32_to_float32(val, &env->spe_status);
2333
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2334
    u.f = float32_div(u.f, tmp, &env->spe_status);
2335

    
2336
    return u.l;
2337
}
2338

    
2339
static always_inline uint32_t efsctsf (uint32_t val)
2340
{
2341
    CPU_FloatU u;
2342
    float32 tmp;
2343

    
2344
    u.l = val;
2345
    /* NaN are not treated the same way IEEE 754 does */
2346
    if (unlikely(float32_is_nan(u.f)))
2347
        return 0;
2348
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2349
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2350

    
2351
    return float32_to_int32(u.f, &env->spe_status);
2352
}
2353

    
2354
static always_inline uint32_t efsctuf (uint32_t val)
2355
{
2356
    CPU_FloatU u;
2357
    float32 tmp;
2358

    
2359
    u.l = val;
2360
    /* NaN are not treated the same way IEEE 754 does */
2361
    if (unlikely(float32_is_nan(u.f)))
2362
        return 0;
2363
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2364
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2365

    
2366
    return float32_to_uint32(u.f, &env->spe_status);
2367
}
2368

    
2369
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2370
uint32_t helper_e##name (uint32_t val)                                        \
2371
{                                                                             \
2372
    return e##name(val);                                                      \
2373
}
2374
/* efscfsi */
2375
HELPER_SPE_SINGLE_CONV(fscfsi);
2376
/* efscfui */
2377
HELPER_SPE_SINGLE_CONV(fscfui);
2378
/* efscfuf */
2379
HELPER_SPE_SINGLE_CONV(fscfuf);
2380
/* efscfsf */
2381
HELPER_SPE_SINGLE_CONV(fscfsf);
2382
/* efsctsi */
2383
HELPER_SPE_SINGLE_CONV(fsctsi);
2384
/* efsctui */
2385
HELPER_SPE_SINGLE_CONV(fsctui);
2386
/* efsctsiz */
2387
HELPER_SPE_SINGLE_CONV(fsctsiz);
2388
/* efsctuiz */
2389
HELPER_SPE_SINGLE_CONV(fsctuiz);
2390
/* efsctsf */
2391
HELPER_SPE_SINGLE_CONV(fsctsf);
2392
/* efsctuf */
2393
HELPER_SPE_SINGLE_CONV(fsctuf);
2394

    
2395
#define HELPER_SPE_VECTOR_CONV(name)                                          \
2396
uint64_t helper_ev##name (uint64_t val)                                       \
2397
{                                                                             \
2398
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
2399
            (uint64_t)e##name(val);                                           \
2400
}
2401
/* evfscfsi */
2402
HELPER_SPE_VECTOR_CONV(fscfsi);
2403
/* evfscfui */
2404
HELPER_SPE_VECTOR_CONV(fscfui);
2405
/* evfscfuf */
2406
HELPER_SPE_VECTOR_CONV(fscfuf);
2407
/* evfscfsf */
2408
HELPER_SPE_VECTOR_CONV(fscfsf);
2409
/* evfsctsi */
2410
HELPER_SPE_VECTOR_CONV(fsctsi);
2411
/* evfsctui */
2412
HELPER_SPE_VECTOR_CONV(fsctui);
2413
/* evfsctsiz */
2414
HELPER_SPE_VECTOR_CONV(fsctsiz);
2415
/* evfsctuiz */
2416
HELPER_SPE_VECTOR_CONV(fsctuiz);
2417
/* evfsctsf */
2418
HELPER_SPE_VECTOR_CONV(fsctsf);
2419
/* evfsctuf */
2420
HELPER_SPE_VECTOR_CONV(fsctuf);
2421

    
2422
/* Single-precision floating-point arithmetic */
2423
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2424
{
2425
    CPU_FloatU u1, u2;
2426
    u1.l = op1;
2427
    u2.l = op2;
2428
    u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2429
    return u1.l;
2430
}
2431

    
2432
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2433
{
2434
    CPU_FloatU u1, u2;
2435
    u1.l = op1;
2436
    u2.l = op2;
2437
    u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2438
    return u1.l;
2439
}
2440

    
2441
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2442
{
2443
    CPU_FloatU u1, u2;
2444
    u1.l = op1;
2445
    u2.l = op2;
2446
    u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2447
    return u1.l;
2448
}
2449

    
2450
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2451
{
2452
    CPU_FloatU u1, u2;
2453
    u1.l = op1;
2454
    u2.l = op2;
2455
    u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2456
    return u1.l;
2457
}
2458

    
2459
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
2460
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2461
{                                                                             \
2462
    return e##name(op1, op2);                                                 \
2463
}
2464
/* efsadd */
2465
HELPER_SPE_SINGLE_ARITH(fsadd);
2466
/* efssub */
2467
HELPER_SPE_SINGLE_ARITH(fssub);
2468
/* efsmul */
2469
HELPER_SPE_SINGLE_ARITH(fsmul);
2470
/* efsdiv */
2471
HELPER_SPE_SINGLE_ARITH(fsdiv);
2472

    
2473
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
2474
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2475
{                                                                             \
2476
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
2477
            (uint64_t)e##name(op1, op2);                                      \
2478
}
2479
/* evfsadd */
2480
HELPER_SPE_VECTOR_ARITH(fsadd);
2481
/* evfssub */
2482
HELPER_SPE_VECTOR_ARITH(fssub);
2483
/* evfsmul */
2484
HELPER_SPE_VECTOR_ARITH(fsmul);
2485
/* evfsdiv */
2486
HELPER_SPE_VECTOR_ARITH(fsdiv);
2487

    
2488
/* Single-precision floating-point comparisons */
2489
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2490
{
2491
    CPU_FloatU u1, u2;
2492
    u1.l = op1;
2493
    u2.l = op2;
2494
    return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2495
}
2496

    
2497
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2498
{
2499
    CPU_FloatU u1, u2;
2500
    u1.l = op1;
2501
    u2.l = op2;
2502
    return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2503
}
2504

    
2505
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2506
{
2507
    CPU_FloatU u1, u2;
2508
    u1.l = op1;
2509
    u2.l = op2;
2510
    return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2511
}
2512

    
2513
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2514
{
2515
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2516
    return efststlt(op1, op2);
2517
}
2518

    
2519
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2520
{
2521
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2522
    return efststgt(op1, op2);
2523
}
2524

    
2525
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2526
{
2527
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2528
    return efststeq(op1, op2);
2529
}
2530

    
2531
#define HELPER_SINGLE_SPE_CMP(name)                                           \
2532
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2533
{                                                                             \
2534
    return e##name(op1, op2) << 2;                                            \
2535
}
2536
/* efststlt */
2537
HELPER_SINGLE_SPE_CMP(fststlt);
2538
/* efststgt */
2539
HELPER_SINGLE_SPE_CMP(fststgt);
2540
/* efststeq */
2541
HELPER_SINGLE_SPE_CMP(fststeq);
2542
/* efscmplt */
2543
HELPER_SINGLE_SPE_CMP(fscmplt);
2544
/* efscmpgt */
2545
HELPER_SINGLE_SPE_CMP(fscmpgt);
2546
/* efscmpeq */
2547
HELPER_SINGLE_SPE_CMP(fscmpeq);
2548

    
2549
static always_inline uint32_t evcmp_merge (int t0, int t1)
2550
{
2551
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2552
}
2553

    
2554
#define HELPER_VECTOR_SPE_CMP(name)                                           \
2555
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2556
{                                                                             \
2557
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
2558
}
2559
/* evfststlt */
2560
HELPER_VECTOR_SPE_CMP(fststlt);
2561
/* evfststgt */
2562
HELPER_VECTOR_SPE_CMP(fststgt);
2563
/* evfststeq */
2564
HELPER_VECTOR_SPE_CMP(fststeq);
2565
/* evfscmplt */
2566
HELPER_VECTOR_SPE_CMP(fscmplt);
2567
/* evfscmpgt */
2568
HELPER_VECTOR_SPE_CMP(fscmpgt);
2569
/* evfscmpeq */
2570
HELPER_VECTOR_SPE_CMP(fscmpeq);
2571

    
2572
/* Double-precision floating-point conversion */
2573
uint64_t helper_efdcfsi (uint32_t val)
2574
{
2575
    CPU_DoubleU u;
2576

    
2577
    u.d = int32_to_float64(val, &env->spe_status);
2578

    
2579
    return u.ll;
2580
}
2581

    
2582
uint64_t helper_efdcfsid (uint64_t val)
2583
{
2584
    CPU_DoubleU u;
2585

    
2586
    u.d = int64_to_float64(val, &env->spe_status);
2587

    
2588
    return u.ll;
2589
}
2590

    
2591
uint64_t helper_efdcfui (uint32_t val)
2592
{
2593
    CPU_DoubleU u;
2594

    
2595
    u.d = uint32_to_float64(val, &env->spe_status);
2596

    
2597
    return u.ll;
2598
}
2599

    
2600
uint64_t helper_efdcfuid (uint64_t val)
2601
{
2602
    CPU_DoubleU u;
2603

    
2604
    u.d = uint64_to_float64(val, &env->spe_status);
2605

    
2606
    return u.ll;
2607
}
2608

    
2609
uint32_t helper_efdctsi (uint64_t val)
2610
{
2611
    CPU_DoubleU u;
2612

    
2613
    u.ll = val;
2614
    /* NaN are not treated the same way IEEE 754 does */
2615
    if (unlikely(float64_is_nan(u.d)))
2616
        return 0;
2617

    
2618
    return float64_to_int32(u.d, &env->spe_status);
2619
}
2620

    
2621
uint32_t helper_efdctui (uint64_t val)
2622
{
2623
    CPU_DoubleU u;
2624

    
2625
    u.ll = val;
2626
    /* NaN are not treated the same way IEEE 754 does */
2627
    if (unlikely(float64_is_nan(u.d)))
2628
        return 0;
2629

    
2630
    return float64_to_uint32(u.d, &env->spe_status);
2631
}
2632

    
2633
uint32_t helper_efdctsiz (uint64_t val)
2634
{
2635
    CPU_DoubleU u;
2636

    
2637
    u.ll = val;
2638
    /* NaN are not treated the same way IEEE 754 does */
2639
    if (unlikely(float64_is_nan(u.d)))
2640
        return 0;
2641

    
2642
    return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2643
}
2644

    
2645
uint64_t helper_efdctsidz (uint64_t val)
2646
{
2647
    CPU_DoubleU u;
2648

    
2649
    u.ll = val;
2650
    /* NaN are not treated the same way IEEE 754 does */
2651
    if (unlikely(float64_is_nan(u.d)))
2652
        return 0;
2653

    
2654
    return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2655
}
2656

    
2657
uint32_t helper_efdctuiz (uint64_t val)
2658
{
2659
    CPU_DoubleU u;
2660

    
2661
    u.ll = val;
2662
    /* NaN are not treated the same way IEEE 754 does */
2663
    if (unlikely(float64_is_nan(u.d)))
2664
        return 0;
2665

    
2666
    return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2667
}
2668

    
2669
uint64_t helper_efdctuidz (uint64_t val)
2670
{
2671
    CPU_DoubleU u;
2672

    
2673
    u.ll = val;
2674
    /* NaN are not treated the same way IEEE 754 does */
2675
    if (unlikely(float64_is_nan(u.d)))
2676
        return 0;
2677

    
2678
    return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2679
}
2680

    
2681
uint64_t helper_efdcfsf (uint32_t val)
2682
{
2683
    CPU_DoubleU u;
2684
    float64 tmp;
2685

    
2686
    u.d = int32_to_float64(val, &env->spe_status);
2687
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2688
    u.d = float64_div(u.d, tmp, &env->spe_status);
2689

    
2690
    return u.ll;
2691
}
2692

    
2693
uint64_t helper_efdcfuf (uint32_t val)
2694
{
2695
    CPU_DoubleU u;
2696
    float64 tmp;
2697

    
2698
    u.d = uint32_to_float64(val, &env->spe_status);
2699
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2700
    u.d = float64_div(u.d, tmp, &env->spe_status);
2701

    
2702
    return u.ll;
2703
}
2704

    
2705
uint32_t helper_efdctsf (uint64_t val)
2706
{
2707
    CPU_DoubleU u;
2708
    float64 tmp;
2709

    
2710
    u.ll = val;
2711
    /* NaN are not treated the same way IEEE 754 does */
2712
    if (unlikely(float64_is_nan(u.d)))
2713
        return 0;
2714
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2715
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2716

    
2717
    return float64_to_int32(u.d, &env->spe_status);
2718
}
2719

    
2720
uint32_t helper_efdctuf (uint64_t val)
2721
{
2722
    CPU_DoubleU u;
2723
    float64 tmp;
2724

    
2725
    u.ll = val;
2726
    /* NaN are not treated the same way IEEE 754 does */
2727
    if (unlikely(float64_is_nan(u.d)))
2728
        return 0;
2729
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2730
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2731

    
2732
    return float64_to_uint32(u.d, &env->spe_status);
2733
}
2734

    
2735
uint32_t helper_efscfd (uint64_t val)
2736
{
2737
    CPU_DoubleU u1;
2738
    CPU_FloatU u2;
2739

    
2740
    u1.ll = val;
2741
    u2.f = float64_to_float32(u1.d, &env->spe_status);
2742

    
2743
    return u2.l;
2744
}
2745

    
2746
uint64_t helper_efdcfs (uint32_t val)
2747
{
2748
    CPU_DoubleU u2;
2749
    CPU_FloatU u1;
2750

    
2751
    u1.l = val;
2752
    u2.d = float32_to_float64(u1.f, &env->spe_status);
2753

    
2754
    return u2.ll;
2755
}
2756

    
2757
/* Double precision fixed-point arithmetic */
2758
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2759
{
2760
    CPU_DoubleU u1, u2;
2761
    u1.ll = op1;
2762
    u2.ll = op2;
2763
    u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2764
    return u1.ll;
2765
}
2766

    
2767
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2768
{
2769
    CPU_DoubleU u1, u2;
2770
    u1.ll = op1;
2771
    u2.ll = op2;
2772
    u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2773
    return u1.ll;
2774
}
2775

    
2776
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2777
{
2778
    CPU_DoubleU u1, u2;
2779
    u1.ll = op1;
2780
    u2.ll = op2;
2781
    u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2782
    return u1.ll;
2783
}
2784

    
2785
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2786
{
2787
    CPU_DoubleU u1, u2;
2788
    u1.ll = op1;
2789
    u2.ll = op2;
2790
    u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2791
    return u1.ll;
2792
}
2793

    
2794
/* Double precision floating point helpers */
2795
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2796
{
2797
    CPU_DoubleU u1, u2;
2798
    u1.ll = op1;
2799
    u2.ll = op2;
2800
    return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2801
}
2802

    
2803
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2804
{
2805
    CPU_DoubleU u1, u2;
2806
    u1.ll = op1;
2807
    u2.ll = op2;
2808
    return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2809
}
2810

    
2811
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2812
{
2813
    CPU_DoubleU u1, u2;
2814
    u1.ll = op1;
2815
    u2.ll = op2;
2816
    return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2817
}
2818

    
2819
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2820
{
2821
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2822
    return helper_efdtstlt(op1, op2);
2823
}
2824

    
2825
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2826
{
2827
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2828
    return helper_efdtstgt(op1, op2);
2829
}
2830

    
2831
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2832
{
2833
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2834
    return helper_efdtsteq(op1, op2);
2835
}
2836

    
2837
/*****************************************************************************/
2838
/* Softmmu support */
2839
#if !defined (CONFIG_USER_ONLY)
2840

    
2841
#define MMUSUFFIX _mmu
2842

    
2843
#define SHIFT 0
2844
#include "softmmu_template.h"
2845

    
2846
#define SHIFT 1
2847
#include "softmmu_template.h"
2848

    
2849
#define SHIFT 2
2850
#include "softmmu_template.h"
2851

    
2852
#define SHIFT 3
2853
#include "softmmu_template.h"
2854

    
2855
/* try to fill the TLB and return an exception if error. If retaddr is
2856
   NULL, it means that the function was called in C code (i.e. not
2857
   from generated code or from helper.c) */
2858
/* XXX: fix it to restore all registers */
2859
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2860
{
2861
    TranslationBlock *tb;
2862
    CPUState *saved_env;
2863
    unsigned long pc;
2864
    int ret;
2865

    
2866
    /* XXX: hack to restore env in all cases, even if not called from
2867
       generated code */
2868
    saved_env = env;
2869
    env = cpu_single_env;
2870
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2871
    if (unlikely(ret != 0)) {
2872
        if (likely(retaddr)) {
2873
            /* now we have a real cpu fault */
2874
            pc = (unsigned long)retaddr;
2875
            tb = tb_find_pc(pc);
2876
            if (likely(tb)) {
2877
                /* the PC is inside the translated code. It means that we have
2878
                   a virtual CPU fault */
2879
                cpu_restore_state(tb, env, pc, NULL);
2880
            }
2881
        }
2882
        helper_raise_exception_err(env->exception_index, env->error_code);
2883
    }
2884
    env = saved_env;
2885
}
2886

    
2887
/* Segment registers load and store */
2888
target_ulong helper_load_sr (target_ulong sr_num)
2889
{
2890
    return env->sr[sr_num];
2891
}
2892

    
2893
void helper_store_sr (target_ulong sr_num, target_ulong val)
2894
{
2895
    ppc_store_sr(env, sr_num, val);
2896
}
2897

    
2898
/* SLB management */
2899
#if defined(TARGET_PPC64)
2900
target_ulong helper_load_slb (target_ulong slb_nr)
2901
{
2902
    return ppc_load_slb(env, slb_nr);
2903
}
2904

    
2905
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
2906
{
2907
    ppc_store_slb(env, slb_nr, rs);
2908
}
2909

    
2910
void helper_slbia (void)
2911
{
2912
    ppc_slb_invalidate_all(env);
2913
}
2914

    
2915
void helper_slbie (target_ulong addr)
2916
{
2917
    ppc_slb_invalidate_one(env, addr);
2918
}
2919

    
2920
#endif /* defined(TARGET_PPC64) */
2921

    
2922
/* TLB management */
2923
void helper_tlbia (void)
2924
{
2925
    ppc_tlb_invalidate_all(env);
2926
}
2927

    
2928
void helper_tlbie (target_ulong addr)
2929
{
2930
    ppc_tlb_invalidate_one(env, addr);
2931
}
2932

    
2933
/* Software driven TLBs management */
2934
/* PowerPC 602/603 software TLB load instructions helpers */
2935
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
2936
{
2937
    target_ulong RPN, CMP, EPN;
2938
    int way;
2939

    
2940
    RPN = env->spr[SPR_RPA];
2941
    if (is_code) {
2942
        CMP = env->spr[SPR_ICMP];
2943
        EPN = env->spr[SPR_IMISS];
2944
    } else {
2945
        CMP = env->spr[SPR_DCMP];
2946
        EPN = env->spr[SPR_DMISS];
2947
    }
2948
    way = (env->spr[SPR_SRR1] >> 17) & 1;
2949
#if defined (DEBUG_SOFTWARE_TLB)
2950
    if (loglevel != 0) {
2951
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
2952
                " PTE1 " ADDRX " way %d\n",
2953
                __func__, new_EPN, EPN, CMP, RPN, way);
2954
    }
2955
#endif
2956
    /* Store this TLB */
2957
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2958
                     way, is_code, CMP, RPN);
2959
}
2960

    
2961
void helper_6xx_tlbd (target_ulong EPN)
2962
{
2963
    do_6xx_tlb(EPN, 0);
2964
}
2965

    
2966
void helper_6xx_tlbi (target_ulong EPN)
2967
{
2968
    do_6xx_tlb(EPN, 1);
2969
}
2970

    
2971
/* PowerPC 74xx software TLB load instructions helpers */
2972
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
2973
{
2974
    target_ulong RPN, CMP, EPN;
2975
    int way;
2976

    
2977
    RPN = env->spr[SPR_PTELO];
2978
    CMP = env->spr[SPR_PTEHI];
2979
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
2980
    way = env->spr[SPR_TLBMISS] & 0x3;
2981
#if defined (DEBUG_SOFTWARE_TLB)
2982
    if (loglevel != 0) {
2983
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
2984
                " PTE1 " ADDRX " way %d\n",
2985
                __func__, new_EPN, EPN, CMP, RPN, way);
2986
    }
2987
#endif
2988
    /* Store this TLB */
2989
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2990
                     way, is_code, CMP, RPN);
2991
}
2992

    
2993
void helper_74xx_tlbd (target_ulong EPN)
2994
{
2995
    do_74xx_tlb(EPN, 0);
2996
}
2997

    
2998
void helper_74xx_tlbi (target_ulong EPN)
2999
{
3000
    do_74xx_tlb(EPN, 1);
3001
}
3002

    
3003
static always_inline target_ulong booke_tlb_to_page_size (int size)
3004
{
3005
    return 1024 << (2 * size);
3006
}
3007

    
3008
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3009
{
3010
    int size;
3011

    
3012
    switch (page_size) {
3013
    case 0x00000400UL:
3014
        size = 0x0;
3015
        break;
3016
    case 0x00001000UL:
3017
        size = 0x1;
3018
        break;
3019
    case 0x00004000UL:
3020
        size = 0x2;
3021
        break;
3022
    case 0x00010000UL:
3023
        size = 0x3;
3024
        break;
3025
    case 0x00040000UL:
3026
        size = 0x4;
3027
        break;
3028
    case 0x00100000UL:
3029
        size = 0x5;
3030
        break;
3031
    case 0x00400000UL:
3032
        size = 0x6;
3033
        break;
3034
    case 0x01000000UL:
3035
        size = 0x7;
3036
        break;
3037
    case 0x04000000UL:
3038
        size = 0x8;
3039
        break;
3040
    case 0x10000000UL:
3041
        size = 0x9;
3042
        break;
3043
    case 0x40000000UL:
3044
        size = 0xA;
3045
        break;
3046
#if defined (TARGET_PPC64)
3047
    case 0x000100000000ULL:
3048
        size = 0xB;
3049
        break;
3050
    case 0x000400000000ULL:
3051
        size = 0xC;
3052
        break;
3053
    case 0x001000000000ULL:
3054
        size = 0xD;
3055
        break;
3056
    case 0x004000000000ULL:
3057
        size = 0xE;
3058
        break;
3059
    case 0x010000000000ULL:
3060
        size = 0xF;
3061
        break;
3062
#endif
3063
    default:
3064
        size = -1;
3065
        break;
3066
    }
3067

    
3068
    return size;
3069
}
3070

    
3071
/* Helpers for 4xx TLB management */
3072
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3073
{
3074
    ppcemb_tlb_t *tlb;
3075
    target_ulong ret;
3076
    int size;
3077

    
3078
    entry &= 0x3F;
3079
    tlb = &env->tlb[entry].tlbe;
3080
    ret = tlb->EPN;
3081
    if (tlb->prot & PAGE_VALID)
3082
        ret |= 0x400;
3083
    size = booke_page_size_to_tlb(tlb->size);
3084
    if (size < 0 || size > 0x7)
3085
        size = 1;
3086
    ret |= size << 7;
3087
    env->spr[SPR_40x_PID] = tlb->PID;
3088
    return ret;
3089
}
3090

    
3091
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3092
{
3093
    ppcemb_tlb_t *tlb;
3094
    target_ulong ret;
3095

    
3096
    entry &= 0x3F;
3097
    tlb = &env->tlb[entry].tlbe;
3098
    ret = tlb->RPN;
3099
    if (tlb->prot & PAGE_EXEC)
3100
        ret |= 0x200;
3101
    if (tlb->prot & PAGE_WRITE)
3102
        ret |= 0x100;
3103
    return ret;
3104
}
3105

    
3106
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3107
{
3108
    ppcemb_tlb_t *tlb;
3109
    target_ulong page, end;
3110

    
3111
#if defined (DEBUG_SOFTWARE_TLB)
3112
    if (loglevel != 0) {
3113
        fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3114
    }
3115
#endif
3116
    entry &= 0x3F;
3117
    tlb = &env->tlb[entry].tlbe;
3118
    /* Invalidate previous TLB (if it's valid) */
3119
    if (tlb->prot & PAGE_VALID) {
3120
        end = tlb->EPN + tlb->size;
3121
#if defined (DEBUG_SOFTWARE_TLB)
3122
        if (loglevel != 0) {
3123
            fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
3124
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3125
        }
3126
#endif
3127
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3128
            tlb_flush_page(env, page);
3129
    }
3130
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3131
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3132
     * If this ever occurs, one should use the ppcemb target instead
3133
     * of the ppc or ppc64 one
3134
     */
3135
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3136
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3137
                  "are not supported (%d)\n",
3138
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3139
    }
3140
    tlb->EPN = val & ~(tlb->size - 1);
3141
    if (val & 0x40)
3142
        tlb->prot |= PAGE_VALID;
3143
    else
3144
        tlb->prot &= ~PAGE_VALID;
3145
    if (val & 0x20) {
3146
        /* XXX: TO BE FIXED */
3147
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3148
    }
3149
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3150
    tlb->attr = val & 0xFF;
3151
#if defined (DEBUG_SOFTWARE_TLB)
3152
    if (loglevel != 0) {
3153
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3154
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3155
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3156
                tlb->prot & PAGE_READ ? 'r' : '-',
3157
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3158
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3159
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3160
    }
3161
#endif
3162
    /* Invalidate new TLB (if valid) */
3163
    if (tlb->prot & PAGE_VALID) {
3164
        end = tlb->EPN + tlb->size;
3165
#if defined (DEBUG_SOFTWARE_TLB)
3166
        if (loglevel != 0) {
3167
            fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
3168
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3169
        }
3170
#endif
3171
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3172
            tlb_flush_page(env, page);
3173
    }
3174
}
3175

    
3176
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3177
{
3178
    ppcemb_tlb_t *tlb;
3179

    
3180
#if defined (DEBUG_SOFTWARE_TLB)
3181
    if (loglevel != 0) {
3182
        fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3183
    }
3184
#endif
3185
    entry &= 0x3F;
3186
    tlb = &env->tlb[entry].tlbe;
3187
    tlb->RPN = val & 0xFFFFFC00;
3188
    tlb->prot = PAGE_READ;
3189
    if (val & 0x200)
3190
        tlb->prot |= PAGE_EXEC;
3191
    if (val & 0x100)
3192
        tlb->prot |= PAGE_WRITE;
3193
#if defined (DEBUG_SOFTWARE_TLB)
3194
    if (loglevel != 0) {
3195
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3196
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3197
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3198
                tlb->prot & PAGE_READ ? 'r' : '-',
3199
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3200
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3201
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3202
    }
3203
#endif
3204
}
3205

    
3206
target_ulong helper_4xx_tlbsx (target_ulong address)
3207
{
3208
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3209
}
3210

    
3211
/* PowerPC 440 TLB management */
3212
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3213
{
3214
    ppcemb_tlb_t *tlb;
3215
    target_ulong EPN, RPN, size;
3216
    int do_flush_tlbs;
3217

    
3218
#if defined (DEBUG_SOFTWARE_TLB)
3219
    if (loglevel != 0) {
3220
        fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3221
                __func__, word, (int)entry, value);
3222
    }
3223
#endif
3224
    do_flush_tlbs = 0;
3225
    entry &= 0x3F;
3226
    tlb = &env->tlb[entry].tlbe;
3227
    switch (word) {
3228
    default:
3229
        /* Just here to please gcc */
3230
    case 0:
3231
        EPN = value & 0xFFFFFC00;
3232
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3233
            do_flush_tlbs = 1;
3234
        tlb->EPN = EPN;
3235
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3236
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3237
            do_flush_tlbs = 1;
3238
        tlb->size = size;
3239
        tlb->attr &= ~0x1;
3240
        tlb->attr |= (value >> 8) & 1;
3241
        if (value & 0x200) {
3242
            tlb->prot |= PAGE_VALID;
3243
        } else {
3244
            if (tlb->prot & PAGE_VALID) {
3245
                tlb->prot &= ~PAGE_VALID;
3246
                do_flush_tlbs = 1;
3247
            }
3248
        }
3249
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3250
        if (do_flush_tlbs)
3251
            tlb_flush(env, 1);
3252
        break;
3253
    case 1:
3254
        RPN = value & 0xFFFFFC0F;
3255
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3256
            tlb_flush(env, 1);
3257
        tlb->RPN = RPN;
3258
        break;
3259
    case 2:
3260
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3261
        tlb->prot = tlb->prot & PAGE_VALID;
3262
        if (value & 0x1)
3263
            tlb->prot |= PAGE_READ << 4;
3264
        if (value & 0x2)
3265
            tlb->prot |= PAGE_WRITE << 4;
3266
        if (value & 0x4)
3267
            tlb->prot |= PAGE_EXEC << 4;
3268
        if (value & 0x8)
3269
            tlb->prot |= PAGE_READ;
3270
        if (value & 0x10)
3271
            tlb->prot |= PAGE_WRITE;
3272
        if (value & 0x20)
3273
            tlb->prot |= PAGE_EXEC;
3274
        break;
3275
    }
3276
}
3277

    
3278
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3279
{
3280
    ppcemb_tlb_t *tlb;
3281
    target_ulong ret;
3282
    int size;
3283

    
3284
    entry &= 0x3F;
3285
    tlb = &env->tlb[entry].tlbe;
3286
    switch (word) {
3287
    default:
3288
        /* Just here to please gcc */
3289
    case 0:
3290
        ret = tlb->EPN;
3291
        size = booke_page_size_to_tlb(tlb->size);
3292
        if (size < 0 || size > 0xF)
3293
            size = 1;
3294
        ret |= size << 4;
3295
        if (tlb->attr & 0x1)
3296
            ret |= 0x100;
3297
        if (tlb->prot & PAGE_VALID)
3298
            ret |= 0x200;
3299
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3300
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3301
        break;
3302
    case 1:
3303
        ret = tlb->RPN;
3304
        break;
3305
    case 2:
3306
        ret = tlb->attr & ~0x1;
3307
        if (tlb->prot & (PAGE_READ << 4))
3308
            ret |= 0x1;
3309
        if (tlb->prot & (PAGE_WRITE << 4))
3310
            ret |= 0x2;
3311
        if (tlb->prot & (PAGE_EXEC << 4))
3312
            ret |= 0x4;
3313
        if (tlb->prot & PAGE_READ)
3314
            ret |= 0x8;
3315
        if (tlb->prot & PAGE_WRITE)
3316
            ret |= 0x10;
3317
        if (tlb->prot & PAGE_EXEC)
3318
            ret |= 0x20;
3319
        break;
3320
    }
3321
    return ret;
3322
}
3323

    
3324
target_ulong helper_440_tlbsx (target_ulong address)
3325
{
3326
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3327
}
3328

    
3329
#endif /* !CONFIG_USER_ONLY */