Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ e4e6bee7

History | View | Annotate | Download (94 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33

    
34
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35
{
36
#if 0
37
    printf("Raise exception %3x code : %d\n", exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
/*****************************************************************************/
50
/* Registers load and stores */
51
target_ulong helper_load_cr (void)
52
{
53
    return (env->crf[0] << 28) |
54
           (env->crf[1] << 24) |
55
           (env->crf[2] << 20) |
56
           (env->crf[3] << 16) |
57
           (env->crf[4] << 12) |
58
           (env->crf[5] << 8) |
59
           (env->crf[6] << 4) |
60
           (env->crf[7] << 0);
61
}
62

    
63
void helper_store_cr (target_ulong val, uint32_t mask)
64
{
65
    int i, sh;
66

    
67
    for (i = 0, sh = 7; i < 8; i++, sh--) {
68
        if (mask & (1 << sh))
69
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
70
    }
71
}
72

    
73
/*****************************************************************************/
74
/* SPR accesses */
75
void helper_load_dump_spr (uint32_t sprn)
76
{
77
    if (loglevel != 0) {
78
        fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
79
                sprn, sprn, env->spr[sprn]);
80
    }
81
}
82

    
83
void helper_store_dump_spr (uint32_t sprn)
84
{
85
    if (loglevel != 0) {
86
        fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
87
                sprn, sprn, env->spr[sprn]);
88
    }
89
}
90

    
91
target_ulong helper_load_tbl (void)
92
{
93
    return cpu_ppc_load_tbl(env);
94
}
95

    
96
target_ulong helper_load_tbu (void)
97
{
98
    return cpu_ppc_load_tbu(env);
99
}
100

    
101
target_ulong helper_load_atbl (void)
102
{
103
    return cpu_ppc_load_atbl(env);
104
}
105

    
106
target_ulong helper_load_atbu (void)
107
{
108
    return cpu_ppc_load_atbu(env);
109
}
110

    
111
target_ulong helper_load_601_rtcl (void)
112
{
113
    return cpu_ppc601_load_rtcl(env);
114
}
115

    
116
target_ulong helper_load_601_rtcu (void)
117
{
118
    return cpu_ppc601_load_rtcu(env);
119
}
120

    
121
#if !defined(CONFIG_USER_ONLY)
122
#if defined (TARGET_PPC64)
123
void helper_store_asr (target_ulong val)
124
{
125
    ppc_store_asr(env, val);
126
}
127
#endif
128

    
129
void helper_store_sdr1 (target_ulong val)
130
{
131
    ppc_store_sdr1(env, val);
132
}
133

    
134
void helper_store_tbl (target_ulong val)
135
{
136
    cpu_ppc_store_tbl(env, val);
137
}
138

    
139
void helper_store_tbu (target_ulong val)
140
{
141
    cpu_ppc_store_tbu(env, val);
142
}
143

    
144
void helper_store_atbl (target_ulong val)
145
{
146
    cpu_ppc_store_atbl(env, val);
147
}
148

    
149
void helper_store_atbu (target_ulong val)
150
{
151
    cpu_ppc_store_atbu(env, val);
152
}
153

    
154
void helper_store_601_rtcl (target_ulong val)
155
{
156
    cpu_ppc601_store_rtcl(env, val);
157
}
158

    
159
void helper_store_601_rtcu (target_ulong val)
160
{
161
    cpu_ppc601_store_rtcu(env, val);
162
}
163

    
164
target_ulong helper_load_decr (void)
165
{
166
    return cpu_ppc_load_decr(env);
167
}
168

    
169
void helper_store_decr (target_ulong val)
170
{
171
    cpu_ppc_store_decr(env, val);
172
}
173

    
174
void helper_store_hid0_601 (target_ulong val)
175
{
176
    target_ulong hid0;
177

    
178
    hid0 = env->spr[SPR_HID0];
179
    if ((val ^ hid0) & 0x00000008) {
180
        /* Change current endianness */
181
        env->hflags &= ~(1 << MSR_LE);
182
        env->hflags_nmsr &= ~(1 << MSR_LE);
183
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
184
        env->hflags |= env->hflags_nmsr;
185
        if (loglevel != 0) {
186
            fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
187
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
188
        }
189
    }
190
    env->spr[SPR_HID0] = (uint32_t)val;
191
}
192

    
193
void helper_store_403_pbr (uint32_t num, target_ulong value)
194
{
195
    if (likely(env->pb[num] != value)) {
196
        env->pb[num] = value;
197
        /* Should be optimized */
198
        tlb_flush(env, 1);
199
    }
200
}
201

    
202
target_ulong helper_load_40x_pit (void)
203
{
204
    return load_40x_pit(env);
205
}
206

    
207
void helper_store_40x_pit (target_ulong val)
208
{
209
    store_40x_pit(env, val);
210
}
211

    
212
void helper_store_40x_dbcr0 (target_ulong val)
213
{
214
    store_40x_dbcr0(env, val);
215
}
216

    
217
void helper_store_40x_sler (target_ulong val)
218
{
219
    store_40x_sler(env, val);
220
}
221

    
222
void helper_store_booke_tcr (target_ulong val)
223
{
224
    store_booke_tcr(env, val);
225
}
226

    
227
void helper_store_booke_tsr (target_ulong val)
228
{
229
    store_booke_tsr(env, val);
230
}
231

    
232
void helper_store_ibatu (uint32_t nr, target_ulong val)
233
{
234
    ppc_store_ibatu(env, nr, val);
235
}
236

    
237
void helper_store_ibatl (uint32_t nr, target_ulong val)
238
{
239
    ppc_store_ibatl(env, nr, val);
240
}
241

    
242
void helper_store_dbatu (uint32_t nr, target_ulong val)
243
{
244
    ppc_store_dbatu(env, nr, val);
245
}
246

    
247
void helper_store_dbatl (uint32_t nr, target_ulong val)
248
{
249
    ppc_store_dbatl(env, nr, val);
250
}
251

    
252
void helper_store_601_batl (uint32_t nr, target_ulong val)
253
{
254
    ppc_store_ibatl_601(env, nr, val);
255
}
256

    
257
void helper_store_601_batu (uint32_t nr, target_ulong val)
258
{
259
    ppc_store_ibatu_601(env, nr, val);
260
}
261
#endif
262

    
263
/*****************************************************************************/
264
/* Memory load and stores */
265

    
266
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267
{
268
#if defined(TARGET_PPC64)
269
        if (!msr_sf)
270
            return (uint32_t)(addr + arg);
271
        else
272
#endif
273
            return addr + arg;
274
}
275

    
276
void helper_lmw (target_ulong addr, uint32_t reg)
277
{
278
    for (; reg < 32; reg++) {
279
        if (msr_le)
280
            env->gpr[reg] = bswap32(ldl(addr));
281
        else
282
            env->gpr[reg] = ldl(addr);
283
        addr = addr_add(addr, 4);
284
    }
285
}
286

    
287
void helper_stmw (target_ulong addr, uint32_t reg)
288
{
289
    for (; reg < 32; reg++) {
290
        if (msr_le)
291
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
292
        else
293
            stl(addr, (uint32_t)env->gpr[reg]);
294
        addr = addr_add(addr, 4);
295
    }
296
}
297

    
298
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299
{
300
    int sh;
301
    for (; nb > 3; nb -= 4) {
302
        env->gpr[reg] = ldl(addr);
303
        reg = (reg + 1) % 32;
304
        addr = addr_add(addr, 4);
305
    }
306
    if (unlikely(nb > 0)) {
307
        env->gpr[reg] = 0;
308
        for (sh = 24; nb > 0; nb--, sh -= 8) {
309
            env->gpr[reg] |= ldub(addr) << sh;
310
            addr = addr_add(addr, 1);
311
        }
312
    }
313
}
314
/* PPC32 specification says we must generate an exception if
315
 * rA is in the range of registers to be loaded.
316
 * In an other hand, IBM says this is valid, but rA won't be loaded.
317
 * For now, I'll follow the spec...
318
 */
319
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320
{
321
    if (likely(xer_bc != 0)) {
322
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
323
                     (reg < rb && (reg + xer_bc) > rb))) {
324
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
325
                                       POWERPC_EXCP_INVAL |
326
                                       POWERPC_EXCP_INVAL_LSWX);
327
        } else {
328
            helper_lsw(addr, xer_bc, reg);
329
        }
330
    }
331
}
332

    
333
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334
{
335
    int sh;
336
    for (; nb > 3; nb -= 4) {
337
        stl(addr, env->gpr[reg]);
338
        reg = (reg + 1) % 32;
339
        addr = addr_add(addr, 4);
340
    }
341
    if (unlikely(nb > 0)) {
342
        for (sh = 24; nb > 0; nb--, sh -= 8) {
343
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
344
            addr = addr_add(addr, 1);
345
        }
346
    }
347
}
348

    
349
static void do_dcbz(target_ulong addr, int dcache_line_size)
350
{
351
    addr &= ~(dcache_line_size - 1);
352
    int i;
353
    for (i = 0 ; i < dcache_line_size ; i += 4) {
354
        stl(addr + i , 0);
355
    }
356
    if (env->reserve == addr)
357
        env->reserve = (target_ulong)-1ULL;
358
}
359

    
360
void helper_dcbz(target_ulong addr)
361
{
362
    do_dcbz(addr, env->dcache_line_size);
363
}
364

    
365
void helper_dcbz_970(target_ulong addr)
366
{
367
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
368
        do_dcbz(addr, 32);
369
    else
370
        do_dcbz(addr, env->dcache_line_size);
371
}
372

    
373
void helper_icbi(target_ulong addr)
374
{
375
    uint32_t tmp;
376

    
377
    addr &= ~(env->dcache_line_size - 1);
378
    /* Invalidate one cache line :
379
     * PowerPC specification says this is to be treated like a load
380
     * (not a fetch) by the MMU. To be sure it will be so,
381
     * do the load "by hand".
382
     */
383
    tmp = ldl(addr);
384
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
385
}
386

    
387
// XXX: to be tested
388
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
389
{
390
    int i, c, d;
391
    d = 24;
392
    for (i = 0; i < xer_bc; i++) {
393
        c = ldub(addr);
394
        addr = addr_add(addr, 1);
395
        /* ra (if not 0) and rb are never modified */
396
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
397
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
398
        }
399
        if (unlikely(c == xer_cmp))
400
            break;
401
        if (likely(d != 0)) {
402
            d -= 8;
403
        } else {
404
            d = 24;
405
            reg++;
406
            reg = reg & 0x1F;
407
        }
408
    }
409
    return i;
410
}
411

    
412
/*****************************************************************************/
413
/* Fixed point operations helpers */
414
#if defined(TARGET_PPC64)
415

    
416
/* multiply high word */
417
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
418
{
419
    uint64_t tl, th;
420

    
421
    muls64(&tl, &th, arg1, arg2);
422
    return th;
423
}
424

    
425
/* multiply high word unsigned */
426
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
427
{
428
    uint64_t tl, th;
429

    
430
    mulu64(&tl, &th, arg1, arg2);
431
    return th;
432
}
433

    
434
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
435
{
436
    int64_t th;
437
    uint64_t tl;
438

    
439
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
440
    /* If th != 0 && th != -1, then we had an overflow */
441
    if (likely((uint64_t)(th + 1) <= 1)) {
442
        env->xer &= ~(1 << XER_OV);
443
    } else {
444
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
445
    }
446
    return (int64_t)tl;
447
}
448
#endif
449

    
450
target_ulong helper_cntlzw (target_ulong t)
451
{
452
    return clz32(t);
453
}
454

    
455
#if defined(TARGET_PPC64)
456
target_ulong helper_cntlzd (target_ulong t)
457
{
458
    return clz64(t);
459
}
460
#endif
461

    
462
/* shift right arithmetic helper */
463
target_ulong helper_sraw (target_ulong value, target_ulong shift)
464
{
465
    int32_t ret;
466

    
467
    if (likely(!(shift & 0x20))) {
468
        if (likely((uint32_t)shift != 0)) {
469
            shift &= 0x1f;
470
            ret = (int32_t)value >> shift;
471
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
472
                env->xer &= ~(1 << XER_CA);
473
            } else {
474
                env->xer |= (1 << XER_CA);
475
            }
476
        } else {
477
            ret = (int32_t)value;
478
            env->xer &= ~(1 << XER_CA);
479
        }
480
    } else {
481
        ret = (int32_t)value >> 31;
482
        if (ret) {
483
            env->xer |= (1 << XER_CA);
484
        } else {
485
            env->xer &= ~(1 << XER_CA);
486
        }
487
    }
488
    return (target_long)ret;
489
}
490

    
491
#if defined(TARGET_PPC64)
492
target_ulong helper_srad (target_ulong value, target_ulong shift)
493
{
494
    int64_t ret;
495

    
496
    if (likely(!(shift & 0x40))) {
497
        if (likely((uint64_t)shift != 0)) {
498
            shift &= 0x3f;
499
            ret = (int64_t)value >> shift;
500
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
501
                env->xer &= ~(1 << XER_CA);
502
            } else {
503
                env->xer |= (1 << XER_CA);
504
            }
505
        } else {
506
            ret = (int64_t)value;
507
            env->xer &= ~(1 << XER_CA);
508
        }
509
    } else {
510
        ret = (int64_t)value >> 63;
511
        if (ret) {
512
            env->xer |= (1 << XER_CA);
513
        } else {
514
            env->xer &= ~(1 << XER_CA);
515
        }
516
    }
517
    return ret;
518
}
519
#endif
520

    
521
target_ulong helper_popcntb (target_ulong val)
522
{
523
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
524
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
525
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
526
    return val;
527
}
528

    
529
#if defined(TARGET_PPC64)
530
target_ulong helper_popcntb_64 (target_ulong val)
531
{
532
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
533
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
534
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
535
    return val;
536
}
537
#endif
538

    
539
/*****************************************************************************/
540
/* Floating point operations helpers */
541
uint64_t helper_float32_to_float64(uint32_t arg)
542
{
543
    CPU_FloatU f;
544
    CPU_DoubleU d;
545
    f.l = arg;
546
    d.d = float32_to_float64(f.f, &env->fp_status);
547
    return d.ll;
548
}
549

    
550
uint32_t helper_float64_to_float32(uint64_t arg)
551
{
552
    CPU_FloatU f;
553
    CPU_DoubleU d;
554
    d.ll = arg;
555
    f.f = float64_to_float32(d.d, &env->fp_status);
556
    return f.l;
557
}
558

    
559
static always_inline int isden (float64 d)
560
{
561
    CPU_DoubleU u;
562

    
563
    u.d = d;
564

    
565
    return ((u.ll >> 52) & 0x7FF) == 0;
566
}
567

    
568
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
569
{
570
    CPU_DoubleU farg;
571
    int isneg;
572
    int ret;
573
    farg.ll = arg;
574
    isneg = float64_is_neg(farg.d);
575
    if (unlikely(float64_is_nan(farg.d))) {
576
        if (float64_is_signaling_nan(farg.d)) {
577
            /* Signaling NaN: flags are undefined */
578
            ret = 0x00;
579
        } else {
580
            /* Quiet NaN */
581
            ret = 0x11;
582
        }
583
    } else if (unlikely(float64_is_infinity(farg.d))) {
584
        /* +/- infinity */
585
        if (isneg)
586
            ret = 0x09;
587
        else
588
            ret = 0x05;
589
    } else {
590
        if (float64_is_zero(farg.d)) {
591
            /* +/- zero */
592
            if (isneg)
593
                ret = 0x12;
594
            else
595
                ret = 0x02;
596
        } else {
597
            if (isden(farg.d)) {
598
                /* Denormalized numbers */
599
                ret = 0x10;
600
            } else {
601
                /* Normalized numbers */
602
                ret = 0x00;
603
            }
604
            if (isneg) {
605
                ret |= 0x08;
606
            } else {
607
                ret |= 0x04;
608
            }
609
        }
610
    }
611
    if (set_fprf) {
612
        /* We update FPSCR_FPRF */
613
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
614
        env->fpscr |= ret << FPSCR_FPRF;
615
    }
616
    /* We just need fpcc to update Rc1 */
617
    return ret & 0xF;
618
}
619

    
620
/* Floating-point invalid operations exception */
621
static always_inline uint64_t fload_invalid_op_excp (int op)
622
{
623
    uint64_t ret = 0;
624
    int ve;
625

    
626
    ve = fpscr_ve;
627
    switch (op) {
628
    case POWERPC_EXCP_FP_VXSNAN:
629
        env->fpscr |= 1 << FPSCR_VXSNAN;
630
        break;
631
    case POWERPC_EXCP_FP_VXSOFT:
632
        env->fpscr |= 1 << FPSCR_VXSOFT;
633
        break;
634
    case POWERPC_EXCP_FP_VXISI:
635
        /* Magnitude subtraction of infinities */
636
        env->fpscr |= 1 << FPSCR_VXISI;
637
        goto update_arith;
638
    case POWERPC_EXCP_FP_VXIDI:
639
        /* Division of infinity by infinity */
640
        env->fpscr |= 1 << FPSCR_VXIDI;
641
        goto update_arith;
642
    case POWERPC_EXCP_FP_VXZDZ:
643
        /* Division of zero by zero */
644
        env->fpscr |= 1 << FPSCR_VXZDZ;
645
        goto update_arith;
646
    case POWERPC_EXCP_FP_VXIMZ:
647
        /* Multiplication of zero by infinity */
648
        env->fpscr |= 1 << FPSCR_VXIMZ;
649
        goto update_arith;
650
    case POWERPC_EXCP_FP_VXVC:
651
        /* Ordered comparison of NaN */
652
        env->fpscr |= 1 << FPSCR_VXVC;
653
        env->fpscr &= ~(0xF << FPSCR_FPCC);
654
        env->fpscr |= 0x11 << FPSCR_FPCC;
655
        /* We must update the target FPR before raising the exception */
656
        if (ve != 0) {
657
            env->exception_index = POWERPC_EXCP_PROGRAM;
658
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
659
            /* Update the floating-point enabled exception summary */
660
            env->fpscr |= 1 << FPSCR_FEX;
661
            /* Exception is differed */
662
            ve = 0;
663
        }
664
        break;
665
    case POWERPC_EXCP_FP_VXSQRT:
666
        /* Square root of a negative number */
667
        env->fpscr |= 1 << FPSCR_VXSQRT;
668
    update_arith:
669
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670
        if (ve == 0) {
671
            /* Set the result to quiet NaN */
672
            ret = 0xFFF8000000000000ULL;
673
            env->fpscr &= ~(0xF << FPSCR_FPCC);
674
            env->fpscr |= 0x11 << FPSCR_FPCC;
675
        }
676
        break;
677
    case POWERPC_EXCP_FP_VXCVI:
678
        /* Invalid conversion */
679
        env->fpscr |= 1 << FPSCR_VXCVI;
680
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
681
        if (ve == 0) {
682
            /* Set the result to quiet NaN */
683
            ret = 0xFFF8000000000000ULL;
684
            env->fpscr &= ~(0xF << FPSCR_FPCC);
685
            env->fpscr |= 0x11 << FPSCR_FPCC;
686
        }
687
        break;
688
    }
689
    /* Update the floating-point invalid operation summary */
690
    env->fpscr |= 1 << FPSCR_VX;
691
    /* Update the floating-point exception summary */
692
    env->fpscr |= 1 << FPSCR_FX;
693
    if (ve != 0) {
694
        /* Update the floating-point enabled exception summary */
695
        env->fpscr |= 1 << FPSCR_FEX;
696
        if (msr_fe0 != 0 || msr_fe1 != 0)
697
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
698
    }
699
    return ret;
700
}
701

    
702
static always_inline void float_zero_divide_excp (void)
703
{
704
    env->fpscr |= 1 << FPSCR_ZX;
705
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706
    /* Update the floating-point exception summary */
707
    env->fpscr |= 1 << FPSCR_FX;
708
    if (fpscr_ze != 0) {
709
        /* Update the floating-point enabled exception summary */
710
        env->fpscr |= 1 << FPSCR_FEX;
711
        if (msr_fe0 != 0 || msr_fe1 != 0) {
712
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
713
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
714
        }
715
    }
716
}
717

    
718
static always_inline void float_overflow_excp (void)
719
{
720
    env->fpscr |= 1 << FPSCR_OX;
721
    /* Update the floating-point exception summary */
722
    env->fpscr |= 1 << FPSCR_FX;
723
    if (fpscr_oe != 0) {
724
        /* XXX: should adjust the result */
725
        /* Update the floating-point enabled exception summary */
726
        env->fpscr |= 1 << FPSCR_FEX;
727
        /* We must update the target FPR before raising the exception */
728
        env->exception_index = POWERPC_EXCP_PROGRAM;
729
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730
    } else {
731
        env->fpscr |= 1 << FPSCR_XX;
732
        env->fpscr |= 1 << FPSCR_FI;
733
    }
734
}
735

    
736
static always_inline void float_underflow_excp (void)
737
{
738
    env->fpscr |= 1 << FPSCR_UX;
739
    /* Update the floating-point exception summary */
740
    env->fpscr |= 1 << FPSCR_FX;
741
    if (fpscr_ue != 0) {
742
        /* XXX: should adjust the result */
743
        /* Update the floating-point enabled exception summary */
744
        env->fpscr |= 1 << FPSCR_FEX;
745
        /* We must update the target FPR before raising the exception */
746
        env->exception_index = POWERPC_EXCP_PROGRAM;
747
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748
    }
749
}
750

    
751
static always_inline void float_inexact_excp (void)
752
{
753
    env->fpscr |= 1 << FPSCR_XX;
754
    /* Update the floating-point exception summary */
755
    env->fpscr |= 1 << FPSCR_FX;
756
    if (fpscr_xe != 0) {
757
        /* Update the floating-point enabled exception summary */
758
        env->fpscr |= 1 << FPSCR_FEX;
759
        /* We must update the target FPR before raising the exception */
760
        env->exception_index = POWERPC_EXCP_PROGRAM;
761
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762
    }
763
}
764

    
765
static always_inline void fpscr_set_rounding_mode (void)
766
{
767
    int rnd_type;
768

    
769
    /* Set rounding mode */
770
    switch (fpscr_rn) {
771
    case 0:
772
        /* Best approximation (round to nearest) */
773
        rnd_type = float_round_nearest_even;
774
        break;
775
    case 1:
776
        /* Smaller magnitude (round toward zero) */
777
        rnd_type = float_round_to_zero;
778
        break;
779
    case 2:
780
        /* Round toward +infinite */
781
        rnd_type = float_round_up;
782
        break;
783
    default:
784
    case 3:
785
        /* Round toward -infinite */
786
        rnd_type = float_round_down;
787
        break;
788
    }
789
    set_float_rounding_mode(rnd_type, &env->fp_status);
790
}
791

    
792
void helper_fpscr_clrbit (uint32_t bit)
793
{
794
    int prev;
795

    
796
    prev = (env->fpscr >> bit) & 1;
797
    env->fpscr &= ~(1 << bit);
798
    if (prev == 1) {
799
        switch (bit) {
800
        case FPSCR_RN1:
801
        case FPSCR_RN:
802
            fpscr_set_rounding_mode();
803
            break;
804
        default:
805
            break;
806
        }
807
    }
808
}
809

    
810
void helper_fpscr_setbit (uint32_t bit)
811
{
812
    int prev;
813

    
814
    prev = (env->fpscr >> bit) & 1;
815
    env->fpscr |= 1 << bit;
816
    if (prev == 0) {
817
        switch (bit) {
818
        case FPSCR_VX:
819
            env->fpscr |= 1 << FPSCR_FX;
820
            if (fpscr_ve)
821
                goto raise_ve;
822
        case FPSCR_OX:
823
            env->fpscr |= 1 << FPSCR_FX;
824
            if (fpscr_oe)
825
                goto raise_oe;
826
            break;
827
        case FPSCR_UX:
828
            env->fpscr |= 1 << FPSCR_FX;
829
            if (fpscr_ue)
830
                goto raise_ue;
831
            break;
832
        case FPSCR_ZX:
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ze)
835
                goto raise_ze;
836
            break;
837
        case FPSCR_XX:
838
            env->fpscr |= 1 << FPSCR_FX;
839
            if (fpscr_xe)
840
                goto raise_xe;
841
            break;
842
        case FPSCR_VXSNAN:
843
        case FPSCR_VXISI:
844
        case FPSCR_VXIDI:
845
        case FPSCR_VXZDZ:
846
        case FPSCR_VXIMZ:
847
        case FPSCR_VXVC:
848
        case FPSCR_VXSOFT:
849
        case FPSCR_VXSQRT:
850
        case FPSCR_VXCVI:
851
            env->fpscr |= 1 << FPSCR_VX;
852
            env->fpscr |= 1 << FPSCR_FX;
853
            if (fpscr_ve != 0)
854
                goto raise_ve;
855
            break;
856
        case FPSCR_VE:
857
            if (fpscr_vx != 0) {
858
            raise_ve:
859
                env->error_code = POWERPC_EXCP_FP;
860
                if (fpscr_vxsnan)
861
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
862
                if (fpscr_vxisi)
863
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
864
                if (fpscr_vxidi)
865
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
866
                if (fpscr_vxzdz)
867
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
868
                if (fpscr_vximz)
869
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
870
                if (fpscr_vxvc)
871
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
872
                if (fpscr_vxsoft)
873
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
874
                if (fpscr_vxsqrt)
875
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
876
                if (fpscr_vxcvi)
877
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
878
                goto raise_excp;
879
            }
880
            break;
881
        case FPSCR_OE:
882
            if (fpscr_ox != 0) {
883
            raise_oe:
884
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
885
                goto raise_excp;
886
            }
887
            break;
888
        case FPSCR_UE:
889
            if (fpscr_ux != 0) {
890
            raise_ue:
891
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
892
                goto raise_excp;
893
            }
894
            break;
895
        case FPSCR_ZE:
896
            if (fpscr_zx != 0) {
897
            raise_ze:
898
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
899
                goto raise_excp;
900
            }
901
            break;
902
        case FPSCR_XE:
903
            if (fpscr_xx != 0) {
904
            raise_xe:
905
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
906
                goto raise_excp;
907
            }
908
            break;
909
        case FPSCR_RN1:
910
        case FPSCR_RN:
911
            fpscr_set_rounding_mode();
912
            break;
913
        default:
914
            break;
915
        raise_excp:
916
            /* Update the floating-point enabled exception summary */
917
            env->fpscr |= 1 << FPSCR_FEX;
918
                /* We have to update Rc1 before raising the exception */
919
            env->exception_index = POWERPC_EXCP_PROGRAM;
920
            break;
921
        }
922
    }
923
}
924

    
925
void helper_store_fpscr (uint64_t arg, uint32_t mask)
926
{
927
    /*
928
     * We use only the 32 LSB of the incoming fpr
929
     */
930
    uint32_t prev, new;
931
    int i;
932

    
933
    prev = env->fpscr;
934
    new = (uint32_t)arg;
935
    new &= ~0x60000000;
936
    new |= prev & 0x60000000;
937
    for (i = 0; i < 8; i++) {
938
        if (mask & (1 << i)) {
939
            env->fpscr &= ~(0xF << (4 * i));
940
            env->fpscr |= new & (0xF << (4 * i));
941
        }
942
    }
943
    /* Update VX and FEX */
944
    if (fpscr_ix != 0)
945
        env->fpscr |= 1 << FPSCR_VX;
946
    else
947
        env->fpscr &= ~(1 << FPSCR_VX);
948
    if ((fpscr_ex & fpscr_eex) != 0) {
949
        env->fpscr |= 1 << FPSCR_FEX;
950
        env->exception_index = POWERPC_EXCP_PROGRAM;
951
        /* XXX: we should compute it properly */
952
        env->error_code = POWERPC_EXCP_FP;
953
    }
954
    else
955
        env->fpscr &= ~(1 << FPSCR_FEX);
956
    fpscr_set_rounding_mode();
957
}
958

    
959
void helper_float_check_status (void)
960
{
961
#ifdef CONFIG_SOFTFLOAT
962
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
963
        (env->error_code & POWERPC_EXCP_FP)) {
964
        /* Differred floating-point exception after target FPR update */
965
        if (msr_fe0 != 0 || msr_fe1 != 0)
966
            helper_raise_exception_err(env->exception_index, env->error_code);
967
    } else {
968
        int status = get_float_exception_flags(&env->fp_status);
969
        if (status & float_flag_divbyzero) {
970
            float_zero_divide_excp();
971
        } else if (status & float_flag_overflow) {
972
            float_overflow_excp();
973
        } else if (status & float_flag_underflow) {
974
            float_underflow_excp();
975
        } else if (status & float_flag_inexact) {
976
            float_inexact_excp();
977
        }
978
    }
979
#else
980
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
981
        (env->error_code & POWERPC_EXCP_FP)) {
982
        /* Differred floating-point exception after target FPR update */
983
        if (msr_fe0 != 0 || msr_fe1 != 0)
984
            helper_raise_exception_err(env->exception_index, env->error_code);
985
    }
986
#endif
987
}
988

    
989
#ifdef CONFIG_SOFTFLOAT
990
void helper_reset_fpstatus (void)
991
{
992
    set_float_exception_flags(0, &env->fp_status);
993
}
994
#endif
995

    
996
/* fadd - fadd. */
997
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
998
{
999
    CPU_DoubleU farg1, farg2;
1000

    
1001
    farg1.ll = arg1;
1002
    farg2.ll = arg2;
1003
#if USE_PRECISE_EMULATION
1004
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005
                 float64_is_signaling_nan(farg2.d))) {
1006
        /* sNaN addition */
1007
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1010
        /* Magnitude subtraction of infinities */
1011
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012
    } else {
1013
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1014
    }
1015
#else
1016
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017
#endif
1018
    return farg1.ll;
1019
}
1020

    
1021
/* fsub - fsub. */
1022
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1023
{
1024
    CPU_DoubleU farg1, farg2;
1025

    
1026
    farg1.ll = arg1;
1027
    farg2.ll = arg2;
1028
#if USE_PRECISE_EMULATION
1029
{
1030
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031
                 float64_is_signaling_nan(farg2.d))) {
1032
        /* sNaN subtraction */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1035
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1036
        /* Magnitude subtraction of infinities */
1037
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1038
    } else {
1039
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1040
    }
1041
}
1042
#else
1043
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044
#endif
1045
    return farg1.ll;
1046
}
1047

    
1048
/* fmul - fmul. */
1049
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1050
{
1051
    CPU_DoubleU farg1, farg2;
1052

    
1053
    farg1.ll = arg1;
1054
    farg2.ll = arg2;
1055
#if USE_PRECISE_EMULATION
1056
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1057
                 float64_is_signaling_nan(farg2.d))) {
1058
        /* sNaN multiplication */
1059
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1060
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1061
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1062
        /* Multiplication of zero by infinity */
1063
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064
    } else {
1065
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1066
    }
1067
#else
1068
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069
#endif
1070
    return farg1.ll;
1071
}
1072

    
1073
/* fdiv - fdiv. */
1074
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1075
{
1076
    CPU_DoubleU farg1, farg2;
1077

    
1078
    farg1.ll = arg1;
1079
    farg2.ll = arg2;
1080
#if USE_PRECISE_EMULATION
1081
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1082
                 float64_is_signaling_nan(farg2.d))) {
1083
        /* sNaN division */
1084
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1085
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1086
        /* Division of infinity by infinity */
1087
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1088
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1089
        /* Division of zero by zero */
1090
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1091
    } else {
1092
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1093
    }
1094
#else
1095
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096
#endif
1097
    return farg1.ll;
1098
}
1099

    
1100
/* fabs */
1101
uint64_t helper_fabs (uint64_t arg)
1102
{
1103
    CPU_DoubleU farg;
1104

    
1105
    farg.ll = arg;
1106
    farg.d = float64_abs(farg.d);
1107
    return farg.ll;
1108
}
1109

    
1110
/* fnabs */
1111
uint64_t helper_fnabs (uint64_t arg)
1112
{
1113
    CPU_DoubleU farg;
1114

    
1115
    farg.ll = arg;
1116
    farg.d = float64_abs(farg.d);
1117
    farg.d = float64_chs(farg.d);
1118
    return farg.ll;
1119
}
1120

    
1121
/* fneg */
1122
uint64_t helper_fneg (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125

    
1126
    farg.ll = arg;
1127
    farg.d = float64_chs(farg.d);
1128
    return farg.ll;
1129
}
1130

    
1131
/* fctiw - fctiw. */
1132
uint64_t helper_fctiw (uint64_t arg)
1133
{
1134
    CPU_DoubleU farg;
1135
    farg.ll = arg;
1136

    
1137
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1138
        /* sNaN conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141
        /* qNan / infinity conversion */
1142
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143
    } else {
1144
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1145
#if USE_PRECISE_EMULATION
1146
        /* XXX: higher bits are not supposed to be significant.
1147
         *     to make tests easier, return the same as a real PowerPC 750
1148
         */
1149
        farg.ll |= 0xFFF80000ULL << 32;
1150
#endif
1151
    }
1152
    return farg.ll;
1153
}
1154

    
1155
/* fctiwz - fctiwz. */
1156
uint64_t helper_fctiwz (uint64_t arg)
1157
{
1158
    CPU_DoubleU farg;
1159
    farg.ll = arg;
1160

    
1161
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1162
        /* sNaN conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1164
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1165
        /* qNan / infinity conversion */
1166
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167
    } else {
1168
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1169
#if USE_PRECISE_EMULATION
1170
        /* XXX: higher bits are not supposed to be significant.
1171
         *     to make tests easier, return the same as a real PowerPC 750
1172
         */
1173
        farg.ll |= 0xFFF80000ULL << 32;
1174
#endif
1175
    }
1176
    return farg.ll;
1177
}
1178

    
1179
#if defined(TARGET_PPC64)
1180
/* fcfid - fcfid. */
1181
uint64_t helper_fcfid (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.d = int64_to_float64(arg, &env->fp_status);
1185
    return farg.ll;
1186
}
1187

    
1188
/* fctid - fctid. */
1189
uint64_t helper_fctid (uint64_t arg)
1190
{
1191
    CPU_DoubleU farg;
1192
    farg.ll = arg;
1193

    
1194
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1195
        /* sNaN conversion */
1196
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1197
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1198
        /* qNan / infinity conversion */
1199
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1200
    } else {
1201
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1202
    }
1203
    return farg.ll;
1204
}
1205

    
1206
/* fctidz - fctidz. */
1207
uint64_t helper_fctidz (uint64_t arg)
1208
{
1209
    CPU_DoubleU farg;
1210
    farg.ll = arg;
1211

    
1212
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1213
        /* sNaN conversion */
1214
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1215
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1216
        /* qNan / infinity conversion */
1217
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1218
    } else {
1219
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1220
    }
1221
    return farg.ll;
1222
}
1223

    
1224
#endif
1225

    
1226
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1227
{
1228
    CPU_DoubleU farg;
1229
    farg.ll = arg;
1230

    
1231
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1232
        /* sNaN round */
1233
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1234
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1235
        /* qNan / infinity round */
1236
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1237
    } else {
1238
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1239
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1240
        /* Restore rounding mode from FPSCR */
1241
        fpscr_set_rounding_mode();
1242
    }
1243
    return farg.ll;
1244
}
1245

    
1246
uint64_t helper_frin (uint64_t arg)
1247
{
1248
    return do_fri(arg, float_round_nearest_even);
1249
}
1250

    
1251
uint64_t helper_friz (uint64_t arg)
1252
{
1253
    return do_fri(arg, float_round_to_zero);
1254
}
1255

    
1256
uint64_t helper_frip (uint64_t arg)
1257
{
1258
    return do_fri(arg, float_round_up);
1259
}
1260

    
1261
uint64_t helper_frim (uint64_t arg)
1262
{
1263
    return do_fri(arg, float_round_down);
1264
}
1265

    
1266
/* fmadd - fmadd. */
1267
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1268
{
1269
    CPU_DoubleU farg1, farg2, farg3;
1270

    
1271
    farg1.ll = arg1;
1272
    farg2.ll = arg2;
1273
    farg3.ll = arg3;
1274
#if USE_PRECISE_EMULATION
1275
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1276
                 float64_is_signaling_nan(farg2.d) ||
1277
                 float64_is_signaling_nan(farg3.d))) {
1278
        /* sNaN operation */
1279
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1280
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1281
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1282
        /* Multiplication of zero by infinity */
1283
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1284
    } else {
1285
#ifdef FLOAT128
1286
        /* This is the way the PowerPC specification defines it */
1287
        float128 ft0_128, ft1_128;
1288

    
1289
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1290
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1291
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1292
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1293
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1294
            /* Magnitude subtraction of infinities */
1295
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1296
        } else {
1297
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1298
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1299
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1300
        }
1301
#else
1302
        /* This is OK on x86 hosts */
1303
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1304
#endif
1305
    }
1306
#else
1307
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1308
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1309
#endif
1310
    return farg1.ll;
1311
}
1312

    
1313
/* fmsub - fmsub. */
1314
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315
{
1316
    CPU_DoubleU farg1, farg2, farg3;
1317

    
1318
    farg1.ll = arg1;
1319
    farg2.ll = arg2;
1320
    farg3.ll = arg3;
1321
#if USE_PRECISE_EMULATION
1322
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323
                 float64_is_signaling_nan(farg2.d) ||
1324
                 float64_is_signaling_nan(farg3.d))) {
1325
        /* sNaN operation */
1326
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329
        /* Multiplication of zero by infinity */
1330
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331
    } else {
1332
#ifdef FLOAT128
1333
        /* This is the way the PowerPC specification defines it */
1334
        float128 ft0_128, ft1_128;
1335

    
1336
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1341
            /* Magnitude subtraction of infinities */
1342
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343
        } else {
1344
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1346
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347
        }
1348
#else
1349
        /* This is OK on x86 hosts */
1350
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1351
#endif
1352
    }
1353
#else
1354
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1355
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1356
#endif
1357
    return farg1.ll;
1358
}
1359

    
1360
/* fnmadd - fnmadd. */
1361
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1362
{
1363
    CPU_DoubleU farg1, farg2, farg3;
1364

    
1365
    farg1.ll = arg1;
1366
    farg2.ll = arg2;
1367
    farg3.ll = arg3;
1368

    
1369
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1370
                 float64_is_signaling_nan(farg2.d) ||
1371
                 float64_is_signaling_nan(farg3.d))) {
1372
        /* sNaN operation */
1373
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1374
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376
        /* Multiplication of zero by infinity */
1377
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378
    } else {
1379
#if USE_PRECISE_EMULATION
1380
#ifdef FLOAT128
1381
        /* This is the way the PowerPC specification defines it */
1382
        float128 ft0_128, ft1_128;
1383

    
1384
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1388
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1389
            /* Magnitude subtraction of infinities */
1390
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1391
        } else {
1392
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1393
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1394
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1395
        }
1396
#else
1397
        /* This is OK on x86 hosts */
1398
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1399
#endif
1400
#else
1401
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1402
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1403
#endif
1404
        if (likely(!float64_is_nan(farg1.d)))
1405
            farg1.d = float64_chs(farg1.d);
1406
    }
1407
    return farg1.ll;
1408
}
1409

    
1410
/* fnmsub - fnmsub. */
1411
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1412
{
1413
    CPU_DoubleU farg1, farg2, farg3;
1414

    
1415
    farg1.ll = arg1;
1416
    farg2.ll = arg2;
1417
    farg3.ll = arg3;
1418

    
1419
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1420
                 float64_is_signaling_nan(farg2.d) ||
1421
                 float64_is_signaling_nan(farg3.d))) {
1422
        /* sNaN operation */
1423
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1424
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1425
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1426
        /* Multiplication of zero by infinity */
1427
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1428
    } else {
1429
#if USE_PRECISE_EMULATION
1430
#ifdef FLOAT128
1431
        /* This is the way the PowerPC specification defines it */
1432
        float128 ft0_128, ft1_128;
1433

    
1434
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1435
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1436
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1437
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1438
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1439
            /* Magnitude subtraction of infinities */
1440
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1441
        } else {
1442
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445
        }
1446
#else
1447
        /* This is OK on x86 hosts */
1448
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1449
#endif
1450
#else
1451
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1452
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1453
#endif
1454
        if (likely(!float64_is_nan(farg1.d)))
1455
            farg1.d = float64_chs(farg1.d);
1456
    }
1457
    return farg1.ll;
1458
}
1459

    
1460
/* frsp - frsp. */
1461
uint64_t helper_frsp (uint64_t arg)
1462
{
1463
    CPU_DoubleU farg;
1464
    float32 f32;
1465
    farg.ll = arg;
1466

    
1467
#if USE_PRECISE_EMULATION
1468
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
        /* sNaN square root */
1470
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471
    } else {
1472
       f32 = float64_to_float32(farg.d, &env->fp_status);
1473
       farg.d = float32_to_float64(f32, &env->fp_status);
1474
    }
1475
#else
1476
    f32 = float64_to_float32(farg.d, &env->fp_status);
1477
    farg.d = float32_to_float64(f32, &env->fp_status);
1478
#endif
1479
    return farg.ll;
1480
}
1481

    
1482
/* fsqrt - fsqrt. */
1483
uint64_t helper_fsqrt (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1489
        /* sNaN square root */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1492
        /* Square root of a negative nonzero number */
1493
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494
    } else {
1495
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1496
    }
1497
    return farg.ll;
1498
}
1499

    
1500
/* fre - fre. */
1501
uint64_t helper_fre (uint64_t arg)
1502
{
1503
    CPU_DoubleU fone, farg;
1504
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1508
        /* sNaN reciprocal */
1509
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510
    } else {
1511
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1512
    }
1513
    return farg.d;
1514
}
1515

    
1516
/* fres - fres. */
1517
uint64_t helper_fres (uint64_t arg)
1518
{
1519
    CPU_DoubleU fone, farg;
1520
    float32 f32;
1521
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1522
    farg.ll = arg;
1523

    
1524
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1525
        /* sNaN reciprocal */
1526
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527
    } else {
1528
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1529
        f32 = float64_to_float32(farg.d, &env->fp_status);
1530
        farg.d = float32_to_float64(f32, &env->fp_status);
1531
    }
1532
    return farg.ll;
1533
}
1534

    
1535
/* frsqrte  - frsqrte. */
1536
uint64_t helper_frsqrte (uint64_t arg)
1537
{
1538
    CPU_DoubleU fone, farg;
1539
    float32 f32;
1540
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1541
    farg.ll = arg;
1542

    
1543
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1544
        /* sNaN reciprocal square root */
1545
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1546
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1547
        /* Reciprocal square root of a negative nonzero number */
1548
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1549
    } else {
1550
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1551
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1552
        f32 = float64_to_float32(farg.d, &env->fp_status);
1553
        farg.d = float32_to_float64(f32, &env->fp_status);
1554
    }
1555
    return farg.ll;
1556
}
1557

    
1558
/* fsel - fsel. */
1559
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1560
{
1561
    CPU_DoubleU farg1;
1562

    
1563
    farg1.ll = arg1;
1564

    
1565
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1566
        return arg2;
1567
    else
1568
        return arg3;
1569
}
1570

    
1571
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572
{
1573
    CPU_DoubleU farg1, farg2;
1574
    uint32_t ret = 0;
1575
    farg1.ll = arg1;
1576
    farg2.ll = arg2;
1577

    
1578
    if (unlikely(float64_is_nan(farg1.d) ||
1579
                 float64_is_nan(farg2.d))) {
1580
        ret = 0x01UL;
1581
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582
        ret = 0x08UL;
1583
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x04UL;
1585
    } else {
1586
        ret = 0x02UL;
1587
    }
1588

    
1589
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590
    env->fpscr |= ret << FPSCR_FPRF;
1591
    env->crf[crfD] = ret;
1592
    if (unlikely(ret == 0x01UL
1593
                 && (float64_is_signaling_nan(farg1.d) ||
1594
                     float64_is_signaling_nan(farg2.d)))) {
1595
        /* sNaN comparison */
1596
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1597
    }
1598
}
1599

    
1600
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1601
{
1602
    CPU_DoubleU farg1, farg2;
1603
    uint32_t ret = 0;
1604
    farg1.ll = arg1;
1605
    farg2.ll = arg2;
1606

    
1607
    if (unlikely(float64_is_nan(farg1.d) ||
1608
                 float64_is_nan(farg2.d))) {
1609
        ret = 0x01UL;
1610
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1611
        ret = 0x08UL;
1612
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1613
        ret = 0x04UL;
1614
    } else {
1615
        ret = 0x02UL;
1616
    }
1617

    
1618
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1619
    env->fpscr |= ret << FPSCR_FPRF;
1620
    env->crf[crfD] = ret;
1621
    if (unlikely (ret == 0x01UL)) {
1622
        if (float64_is_signaling_nan(farg1.d) ||
1623
            float64_is_signaling_nan(farg2.d)) {
1624
            /* sNaN comparison */
1625
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1626
                                  POWERPC_EXCP_FP_VXVC);
1627
        } else {
1628
            /* qNaN comparison */
1629
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1630
        }
1631
    }
1632
}
1633

    
1634
#if !defined (CONFIG_USER_ONLY)
1635
void helper_store_msr (target_ulong val)
1636
{
1637
    val = hreg_store_msr(env, val, 0);
1638
    if (val != 0) {
1639
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1640
        helper_raise_exception(val);
1641
    }
1642
}
1643

    
1644
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1645
                                    target_ulong msrm, int keep_msrh)
1646
{
1647
#if defined(TARGET_PPC64)
1648
    if (msr & (1ULL << MSR_SF)) {
1649
        nip = (uint64_t)nip;
1650
        msr &= (uint64_t)msrm;
1651
    } else {
1652
        nip = (uint32_t)nip;
1653
        msr = (uint32_t)(msr & msrm);
1654
        if (keep_msrh)
1655
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656
    }
1657
#else
1658
    nip = (uint32_t)nip;
1659
    msr &= (uint32_t)msrm;
1660
#endif
1661
    /* XXX: beware: this is false if VLE is supported */
1662
    env->nip = nip & ~((target_ulong)0x00000003);
1663
    hreg_store_msr(env, msr, 1);
1664
#if defined (DEBUG_OP)
1665
    cpu_dump_rfi(env->nip, env->msr);
1666
#endif
1667
    /* No need to raise an exception here,
1668
     * as rfi is always the last insn of a TB
1669
     */
1670
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671
}
1672

    
1673
void helper_rfi (void)
1674
{
1675
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1676
           ~((target_ulong)0xFFFF0000), 1);
1677
}
1678

    
1679
#if defined(TARGET_PPC64)
1680
void helper_rfid (void)
1681
{
1682
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1683
           ~((target_ulong)0xFFFF0000), 0);
1684
}
1685

    
1686
void helper_hrfid (void)
1687
{
1688
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1689
           ~((target_ulong)0xFFFF0000), 0);
1690
}
1691
#endif
1692
#endif
1693

    
1694
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1695
{
1696
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1697
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1698
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1699
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1700
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1701
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1702
    }
1703
}
1704

    
1705
#if defined(TARGET_PPC64)
1706
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1707
{
1708
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1709
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1710
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1711
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1712
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1713
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1714
}
1715
#endif
1716

    
1717
/*****************************************************************************/
1718
/* PowerPC 601 specific instructions (POWER bridge) */
1719

    
1720
target_ulong helper_clcs (uint32_t arg)
1721
{
1722
    switch (arg) {
1723
    case 0x0CUL:
1724
        /* Instruction cache line size */
1725
        return env->icache_line_size;
1726
        break;
1727
    case 0x0DUL:
1728
        /* Data cache line size */
1729
        return env->dcache_line_size;
1730
        break;
1731
    case 0x0EUL:
1732
        /* Minimum cache line size */
1733
        return (env->icache_line_size < env->dcache_line_size) ?
1734
                env->icache_line_size : env->dcache_line_size;
1735
        break;
1736
    case 0x0FUL:
1737
        /* Maximum cache line size */
1738
        return (env->icache_line_size > env->dcache_line_size) ?
1739
                env->icache_line_size : env->dcache_line_size;
1740
        break;
1741
    default:
1742
        /* Undefined */
1743
        return 0;
1744
        break;
1745
    }
1746
}
1747

    
1748
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1749
{
1750
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1751

    
1752
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->spr[SPR_MQ] = tmp % arg2;
1758
        return  tmp / (int32_t)arg2;
1759
    }
1760
}
1761

    
1762
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1763
{
1764
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1765

    
1766
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1767
        (int32_t)arg2 == 0) {
1768
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1769
        env->spr[SPR_MQ] = 0;
1770
        return INT32_MIN;
1771
    } else {
1772
        env->spr[SPR_MQ] = tmp % arg2;
1773
        tmp /= (int32_t)arg2;
1774
        if ((int32_t)tmp != tmp) {
1775
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1776
        } else {
1777
            env->xer &= ~(1 << XER_OV);
1778
        }
1779
        return tmp;
1780
    }
1781
}
1782

    
1783
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1784
{
1785
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786
        (int32_t)arg2 == 0) {
1787
        env->spr[SPR_MQ] = 0;
1788
        return INT32_MIN;
1789
    } else {
1790
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1791
        return (int32_t)arg1 / (int32_t)arg2;
1792
    }
1793
}
1794

    
1795
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1796
{
1797
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1798
        (int32_t)arg2 == 0) {
1799
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1800
        env->spr[SPR_MQ] = 0;
1801
        return INT32_MIN;
1802
    } else {
1803
        env->xer &= ~(1 << XER_OV);
1804
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1805
        return (int32_t)arg1 / (int32_t)arg2;
1806
    }
1807
}
1808

    
1809
#if !defined (CONFIG_USER_ONLY)
1810
target_ulong helper_rac (target_ulong addr)
1811
{
1812
    mmu_ctx_t ctx;
1813
    int nb_BATs;
1814
    target_ulong ret = 0;
1815

    
1816
    /* We don't have to generate many instances of this instruction,
1817
     * as rac is supervisor only.
1818
     */
1819
    /* XXX: FIX THIS: Pretend we have no BAT */
1820
    nb_BATs = env->nb_BATs;
1821
    env->nb_BATs = 0;
1822
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1823
        ret = ctx.raddr;
1824
    env->nb_BATs = nb_BATs;
1825
    return ret;
1826
}
1827

    
1828
void helper_rfsvc (void)
1829
{
1830
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1831
}
1832
#endif
1833

    
1834
/*****************************************************************************/
1835
/* 602 specific instructions */
1836
/* mfrom is the most crazy instruction ever seen, imho ! */
1837
/* Real implementation uses a ROM table. Do the same */
1838
/* Extremly decomposed:
1839
 *                      -arg / 256
1840
 * return 256 * log10(10           + 1.0) + 0.5
1841
 */
1842
#if !defined (CONFIG_USER_ONLY)
1843
target_ulong helper_602_mfrom (target_ulong arg)
1844
{
1845
    if (likely(arg < 602)) {
1846
#include "mfrom_table.c"
1847
        return mfrom_ROM_table[arg];
1848
    } else {
1849
        return 0;
1850
    }
1851
}
1852
#endif
1853

    
1854
/*****************************************************************************/
1855
/* Embedded PowerPC specific helpers */
1856

    
1857
/* XXX: to be improved to check access rights when in user-mode */
1858
target_ulong helper_load_dcr (target_ulong dcrn)
1859
{
1860
    target_ulong val = 0;
1861

    
1862
    if (unlikely(env->dcr_env == NULL)) {
1863
        if (loglevel != 0) {
1864
            fprintf(logfile, "No DCR environment\n");
1865
        }
1866
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1868
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1869
        if (loglevel != 0) {
1870
            fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1871
        }
1872
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1873
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1874
    }
1875
    return val;
1876
}
1877

    
1878
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1879
{
1880
    if (unlikely(env->dcr_env == NULL)) {
1881
        if (loglevel != 0) {
1882
            fprintf(logfile, "No DCR environment\n");
1883
        }
1884
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1885
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1886
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1887
        if (loglevel != 0) {
1888
            fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1889
        }
1890
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1891
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1892
    }
1893
}
1894

    
1895
#if !defined(CONFIG_USER_ONLY)
1896
void helper_40x_rfci (void)
1897
{
1898
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1899
           ~((target_ulong)0xFFFF0000), 0);
1900
}
1901

    
1902
void helper_rfci (void)
1903
{
1904
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1905
           ~((target_ulong)0x3FFF0000), 0);
1906
}
1907

    
1908
void helper_rfdi (void)
1909
{
1910
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1911
           ~((target_ulong)0x3FFF0000), 0);
1912
}
1913

    
1914
void helper_rfmci (void)
1915
{
1916
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1917
           ~((target_ulong)0x3FFF0000), 0);
1918
}
1919
#endif
1920

    
1921
/* 440 specific */
1922
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1923
{
1924
    target_ulong mask;
1925
    int i;
1926

    
1927
    i = 1;
1928
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929
        if ((high & mask) == 0) {
1930
            if (update_Rc) {
1931
                env->crf[0] = 0x4;
1932
            }
1933
            goto done;
1934
        }
1935
        i++;
1936
    }
1937
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1938
        if ((low & mask) == 0) {
1939
            if (update_Rc) {
1940
                env->crf[0] = 0x8;
1941
            }
1942
            goto done;
1943
        }
1944
        i++;
1945
    }
1946
    if (update_Rc) {
1947
        env->crf[0] = 0x2;
1948
    }
1949
 done:
1950
    env->xer = (env->xer & ~0x7F) | i;
1951
    if (update_Rc) {
1952
        env->crf[0] |= xer_so;
1953
    }
1954
    return i;
1955
}
1956

    
1957
/*****************************************************************************/
1958
/* Altivec extension helpers */
1959
#if defined(WORDS_BIGENDIAN)
1960
#define HI_IDX 0
1961
#define LO_IDX 1
1962
#else
1963
#define HI_IDX 1
1964
#define LO_IDX 0
1965
#endif
1966

    
1967
#if defined(WORDS_BIGENDIAN)
1968
#define VECTOR_FOR_INORDER_I(index, element)            \
1969
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1970
#else
1971
#define VECTOR_FOR_INORDER_I(index, element)            \
1972
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1973
#endif
1974

    
1975
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1976
{
1977
    int i, j = (sh & 0xf);
1978

    
1979
    VECTOR_FOR_INORDER_I (i, u8) {
1980
        r->u8[i] = j++;
1981
    }
1982
}
1983

    
1984
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1985
{
1986
    int i, j = 0x10 - (sh & 0xf);
1987

    
1988
    VECTOR_FOR_INORDER_I (i, u8) {
1989
        r->u8[i] = j++;
1990
    }
1991
}
1992

    
1993
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1994
{
1995
    int i;
1996
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1997
        r->u32[i] = ~a->u32[i] < b->u32[i];
1998
    }
1999
}
2000

    
2001
#define VARITH_DO(name, op, element)        \
2002
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2003
{                                                                       \
2004
    int i;                                                              \
2005
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2006
        r->element[i] = a->element[i] op b->element[i];                 \
2007
    }                                                                   \
2008
}
2009
#define VARITH(suffix, element)                  \
2010
  VARITH_DO(add##suffix, +, element)             \
2011
  VARITH_DO(sub##suffix, -, element)
2012
VARITH(ubm, u8)
2013
VARITH(uhm, u16)
2014
VARITH(uwm, u32)
2015
#undef VARITH_DO
2016
#undef VARITH
2017

    
2018
#define VAVG_DO(name, element, etype)                                   \
2019
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2020
    {                                                                   \
2021
        int i;                                                          \
2022
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2023
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2024
            r->element[i] = x >> 1;                                     \
2025
        }                                                               \
2026
    }
2027

    
2028
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2029
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2030
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2031
VAVG(b, s8, int16_t, u8, uint16_t)
2032
VAVG(h, s16, int32_t, u16, uint32_t)
2033
VAVG(w, s32, int64_t, u32, uint64_t)
2034
#undef VAVG_DO
2035
#undef VAVG
2036

    
2037
#define VMINMAX_DO(name, compare, element)                              \
2038
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2039
    {                                                                   \
2040
        int i;                                                          \
2041
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2042
            if (a->element[i] compare b->element[i]) {                  \
2043
                r->element[i] = b->element[i];                          \
2044
            } else {                                                    \
2045
                r->element[i] = a->element[i];                          \
2046
            }                                                           \
2047
        }                                                               \
2048
    }
2049
#define VMINMAX(suffix, element)                \
2050
  VMINMAX_DO(min##suffix, >, element)           \
2051
  VMINMAX_DO(max##suffix, <, element)
2052
VMINMAX(sb, s8)
2053
VMINMAX(sh, s16)
2054
VMINMAX(sw, s32)
2055
VMINMAX(ub, u8)
2056
VMINMAX(uh, u16)
2057
VMINMAX(uw, u32)
2058
#undef VMINMAX_DO
2059
#undef VMINMAX
2060

    
2061
#define VMRG_DO(name, element, highp)                                   \
2062
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2063
    {                                                                   \
2064
        ppc_avr_t result;                                               \
2065
        int i;                                                          \
2066
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2067
        for (i = 0; i < n_elems/2; i++) {                               \
2068
            if (highp) {                                                \
2069
                result.element[i*2+HI_IDX] = a->element[i];             \
2070
                result.element[i*2+LO_IDX] = b->element[i];             \
2071
            } else {                                                    \
2072
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2073
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2074
            }                                                           \
2075
        }                                                               \
2076
        *r = result;                                                    \
2077
    }
2078
#if defined(WORDS_BIGENDIAN)
2079
#define MRGHI 0
2080
#define MRGL0 1
2081
#else
2082
#define MRGHI 1
2083
#define MRGLO 0
2084
#endif
2085
#define VMRG(suffix, element)                   \
2086
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2087
  VMRG_DO(mrgh##suffix, element, MRGLO)
2088
VMRG(b, u8)
2089
VMRG(h, u16)
2090
VMRG(w, u32)
2091
#undef VMRG_DO
2092
#undef VMRG
2093
#undef MRGHI
2094
#undef MRGLO
2095

    
2096
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2097
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2098
    {                                                                   \
2099
        int i;                                                          \
2100
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2101
            if (evenp) {                                                \
2102
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2103
            } else {                                                    \
2104
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2105
            }                                                           \
2106
        }                                                               \
2107
    }
2108
#define VMUL(suffix, mul_element, prod_element) \
2109
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2110
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2111
VMUL(sb, s8, s16)
2112
VMUL(sh, s16, s32)
2113
VMUL(ub, u8, u16)
2114
VMUL(uh, u16, u32)
2115
#undef VMUL_DO
2116
#undef VMUL
2117

    
2118
#define VROTATE(suffix, element)                                        \
2119
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2120
    {                                                                   \
2121
        int i;                                                          \
2122
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2123
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2124
            unsigned int shift = b->element[i] & mask;                  \
2125
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2126
        }                                                               \
2127
    }
2128
VROTATE(b, u8)
2129
VROTATE(h, u16)
2130
VROTATE(w, u32)
2131
#undef VROTATE
2132

    
2133
#define VSL(suffix, element)                                            \
2134
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2135
    {                                                                   \
2136
        int i;                                                          \
2137
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2138
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2139
            unsigned int shift = b->element[i] & mask;                  \
2140
            r->element[i] = a->element[i] << shift;                     \
2141
        }                                                               \
2142
    }
2143
VSL(b, u8)
2144
VSL(h, u16)
2145
VSL(w, u32)
2146
#undef VSL
2147

    
2148
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2149
{
2150
    int sh = shift & 0xf;
2151
    int i;
2152
    ppc_avr_t result;
2153

    
2154
#if defined(WORDS_BIGENDIAN)
2155
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2156
        int index = sh + i;
2157
        if (index > 0xf) {
2158
            result.u8[i] = b->u8[index-0x10];
2159
        } else {
2160
            result.u8[i] = a->u8[index];
2161
        }
2162
    }
2163
#else
2164
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2165
        int index = (16 - sh) + i;
2166
        if (index > 0xf) {
2167
            result.u8[i] = a->u8[index-0x10];
2168
        } else {
2169
            result.u8[i] = b->u8[index];
2170
        }
2171
    }
2172
#endif
2173
    *r = result;
2174
}
2175

    
2176
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2177
{
2178
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2179

    
2180
#if defined (WORDS_BIGENDIAN)
2181
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2182
  memset (&r->u8[16-sh], 0, sh);
2183
#else
2184
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2185
  memset (&r->u8[0], 0, sh);
2186
#endif
2187
}
2188

    
2189
/* Experimental testing shows that hardware masks the immediate.  */
2190
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2191
#if defined(WORDS_BIGENDIAN)
2192
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2193
#else
2194
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2195
#endif
2196
#define VSPLT(suffix, element)                                          \
2197
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2198
    {                                                                   \
2199
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2200
        int i;                                                          \
2201
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2202
            r->element[i] = s;                                          \
2203
        }                                                               \
2204
    }
2205
VSPLT(b, u8)
2206
VSPLT(h, u16)
2207
VSPLT(w, u32)
2208
#undef VSPLT
2209
#undef SPLAT_ELEMENT
2210
#undef _SPLAT_MASKED
2211

    
2212
#define VSR(suffix, element)                                            \
2213
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2214
    {                                                                   \
2215
        int i;                                                          \
2216
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2217
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2218
            unsigned int shift = b->element[i] & mask;                  \
2219
            r->element[i] = a->element[i] >> shift;                     \
2220
        }                                                               \
2221
    }
2222
VSR(ab, s8)
2223
VSR(ah, s16)
2224
VSR(aw, s32)
2225
VSR(b, u8)
2226
VSR(h, u16)
2227
VSR(w, u32)
2228
#undef VSR
2229

    
2230
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2231
{
2232
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2233

    
2234
#if defined (WORDS_BIGENDIAN)
2235
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2236
  memset (&r->u8[0], 0, sh);
2237
#else
2238
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2239
  memset (&r->u8[16-sh], 0, sh);
2240
#endif
2241
}
2242

    
2243
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2244
{
2245
    int i;
2246
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2247
        r->u32[i] = a->u32[i] >= b->u32[i];
2248
    }
2249
}
2250

    
2251
#undef VECTOR_FOR_INORDER_I
2252
#undef HI_IDX
2253
#undef LO_IDX
2254

    
2255
/*****************************************************************************/
2256
/* SPE extension helpers */
2257
/* Use a table to make this quicker */
2258
static uint8_t hbrev[16] = {
2259
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2260
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2261
};
2262

    
2263
static always_inline uint8_t byte_reverse (uint8_t val)
2264
{
2265
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2266
}
2267

    
2268
static always_inline uint32_t word_reverse (uint32_t val)
2269
{
2270
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2271
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2272
}
2273

    
2274
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2275
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2276
{
2277
    uint32_t a, b, d, mask;
2278

    
2279
    mask = UINT32_MAX >> (32 - MASKBITS);
2280
    a = arg1 & mask;
2281
    b = arg2 & mask;
2282
    d = word_reverse(1 + word_reverse(a | ~b));
2283
    return (arg1 & ~mask) | (d & b);
2284
}
2285

    
2286
uint32_t helper_cntlsw32 (uint32_t val)
2287
{
2288
    if (val & 0x80000000)
2289
        return clz32(~val);
2290
    else
2291
        return clz32(val);
2292
}
2293

    
2294
uint32_t helper_cntlzw32 (uint32_t val)
2295
{
2296
    return clz32(val);
2297
}
2298

    
2299
/* Single-precision floating-point conversions */
2300
static always_inline uint32_t efscfsi (uint32_t val)
2301
{
2302
    CPU_FloatU u;
2303

    
2304
    u.f = int32_to_float32(val, &env->spe_status);
2305

    
2306
    return u.l;
2307
}
2308

    
2309
static always_inline uint32_t efscfui (uint32_t val)
2310
{
2311
    CPU_FloatU u;
2312

    
2313
    u.f = uint32_to_float32(val, &env->spe_status);
2314

    
2315
    return u.l;
2316
}
2317

    
2318
static always_inline int32_t efsctsi (uint32_t val)
2319
{
2320
    CPU_FloatU u;
2321

    
2322
    u.l = val;
2323
    /* NaN are not treated the same way IEEE 754 does */
2324
    if (unlikely(float32_is_nan(u.f)))
2325
        return 0;
2326

    
2327
    return float32_to_int32(u.f, &env->spe_status);
2328
}
2329

    
2330
static always_inline uint32_t efsctui (uint32_t val)
2331
{
2332
    CPU_FloatU u;
2333

    
2334
    u.l = val;
2335
    /* NaN are not treated the same way IEEE 754 does */
2336
    if (unlikely(float32_is_nan(u.f)))
2337
        return 0;
2338

    
2339
    return float32_to_uint32(u.f, &env->spe_status);
2340
}
2341

    
2342
static always_inline uint32_t efsctsiz (uint32_t val)
2343
{
2344
    CPU_FloatU u;
2345

    
2346
    u.l = val;
2347
    /* NaN are not treated the same way IEEE 754 does */
2348
    if (unlikely(float32_is_nan(u.f)))
2349
        return 0;
2350

    
2351
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2352
}
2353

    
2354
static always_inline uint32_t efsctuiz (uint32_t val)
2355
{
2356
    CPU_FloatU u;
2357

    
2358
    u.l = val;
2359
    /* NaN are not treated the same way IEEE 754 does */
2360
    if (unlikely(float32_is_nan(u.f)))
2361
        return 0;
2362

    
2363
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2364
}
2365

    
2366
static always_inline uint32_t efscfsf (uint32_t val)
2367
{
2368
    CPU_FloatU u;
2369
    float32 tmp;
2370

    
2371
    u.f = int32_to_float32(val, &env->spe_status);
2372
    tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2373
    u.f = float32_div(u.f, tmp, &env->spe_status);
2374

    
2375
    return u.l;
2376
}
2377

    
2378
static always_inline uint32_t efscfuf (uint32_t val)
2379
{
2380
    CPU_FloatU u;
2381
    float32 tmp;
2382

    
2383
    u.f = uint32_to_float32(val, &env->spe_status);
2384
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2385
    u.f = float32_div(u.f, tmp, &env->spe_status);
2386

    
2387
    return u.l;
2388
}
2389

    
2390
static always_inline uint32_t efsctsf (uint32_t val)
2391
{
2392
    CPU_FloatU u;
2393
    float32 tmp;
2394

    
2395
    u.l = val;
2396
    /* NaN are not treated the same way IEEE 754 does */
2397
    if (unlikely(float32_is_nan(u.f)))
2398
        return 0;
2399
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2400
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2401

    
2402
    return float32_to_int32(u.f, &env->spe_status);
2403
}
2404

    
2405
static always_inline uint32_t efsctuf (uint32_t val)
2406
{
2407
    CPU_FloatU u;
2408
    float32 tmp;
2409

    
2410
    u.l = val;
2411
    /* NaN are not treated the same way IEEE 754 does */
2412
    if (unlikely(float32_is_nan(u.f)))
2413
        return 0;
2414
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2415
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2416

    
2417
    return float32_to_uint32(u.f, &env->spe_status);
2418
}
2419

    
2420
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2421
uint32_t helper_e##name (uint32_t val)                                        \
2422
{                                                                             \
2423
    return e##name(val);                                                      \
2424
}
2425
/* efscfsi */
2426
HELPER_SPE_SINGLE_CONV(fscfsi);
2427
/* efscfui */
2428
HELPER_SPE_SINGLE_CONV(fscfui);
2429
/* efscfuf */
2430
HELPER_SPE_SINGLE_CONV(fscfuf);
2431
/* efscfsf */
2432
HELPER_SPE_SINGLE_CONV(fscfsf);
2433
/* efsctsi */
2434
HELPER_SPE_SINGLE_CONV(fsctsi);
2435
/* efsctui */
2436
HELPER_SPE_SINGLE_CONV(fsctui);
2437
/* efsctsiz */
2438
HELPER_SPE_SINGLE_CONV(fsctsiz);
2439
/* efsctuiz */
2440
HELPER_SPE_SINGLE_CONV(fsctuiz);
2441
/* efsctsf */
2442
HELPER_SPE_SINGLE_CONV(fsctsf);
2443
/* efsctuf */
2444
HELPER_SPE_SINGLE_CONV(fsctuf);
2445

    
2446
#define HELPER_SPE_VECTOR_CONV(name)                                          \
2447
uint64_t helper_ev##name (uint64_t val)                                       \
2448
{                                                                             \
2449
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
2450
            (uint64_t)e##name(val);                                           \
2451
}
2452
/* evfscfsi */
2453
HELPER_SPE_VECTOR_CONV(fscfsi);
2454
/* evfscfui */
2455
HELPER_SPE_VECTOR_CONV(fscfui);
2456
/* evfscfuf */
2457
HELPER_SPE_VECTOR_CONV(fscfuf);
2458
/* evfscfsf */
2459
HELPER_SPE_VECTOR_CONV(fscfsf);
2460
/* evfsctsi */
2461
HELPER_SPE_VECTOR_CONV(fsctsi);
2462
/* evfsctui */
2463
HELPER_SPE_VECTOR_CONV(fsctui);
2464
/* evfsctsiz */
2465
HELPER_SPE_VECTOR_CONV(fsctsiz);
2466
/* evfsctuiz */
2467
HELPER_SPE_VECTOR_CONV(fsctuiz);
2468
/* evfsctsf */
2469
HELPER_SPE_VECTOR_CONV(fsctsf);
2470
/* evfsctuf */
2471
HELPER_SPE_VECTOR_CONV(fsctuf);
2472

    
2473
/* Single-precision floating-point arithmetic */
2474
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2475
{
2476
    CPU_FloatU u1, u2;
2477
    u1.l = op1;
2478
    u2.l = op2;
2479
    u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2480
    return u1.l;
2481
}
2482

    
2483
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2484
{
2485
    CPU_FloatU u1, u2;
2486
    u1.l = op1;
2487
    u2.l = op2;
2488
    u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2489
    return u1.l;
2490
}
2491

    
2492
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2493
{
2494
    CPU_FloatU u1, u2;
2495
    u1.l = op1;
2496
    u2.l = op2;
2497
    u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2498
    return u1.l;
2499
}
2500

    
2501
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2502
{
2503
    CPU_FloatU u1, u2;
2504
    u1.l = op1;
2505
    u2.l = op2;
2506
    u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2507
    return u1.l;
2508
}
2509

    
2510
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
2511
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2512
{                                                                             \
2513
    return e##name(op1, op2);                                                 \
2514
}
2515
/* efsadd */
2516
HELPER_SPE_SINGLE_ARITH(fsadd);
2517
/* efssub */
2518
HELPER_SPE_SINGLE_ARITH(fssub);
2519
/* efsmul */
2520
HELPER_SPE_SINGLE_ARITH(fsmul);
2521
/* efsdiv */
2522
HELPER_SPE_SINGLE_ARITH(fsdiv);
2523

    
2524
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
2525
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2526
{                                                                             \
2527
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
2528
            (uint64_t)e##name(op1, op2);                                      \
2529
}
2530
/* evfsadd */
2531
HELPER_SPE_VECTOR_ARITH(fsadd);
2532
/* evfssub */
2533
HELPER_SPE_VECTOR_ARITH(fssub);
2534
/* evfsmul */
2535
HELPER_SPE_VECTOR_ARITH(fsmul);
2536
/* evfsdiv */
2537
HELPER_SPE_VECTOR_ARITH(fsdiv);
2538

    
2539
/* Single-precision floating-point comparisons */
2540
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2541
{
2542
    CPU_FloatU u1, u2;
2543
    u1.l = op1;
2544
    u2.l = op2;
2545
    return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2546
}
2547

    
2548
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2549
{
2550
    CPU_FloatU u1, u2;
2551
    u1.l = op1;
2552
    u2.l = op2;
2553
    return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2554
}
2555

    
2556
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2557
{
2558
    CPU_FloatU u1, u2;
2559
    u1.l = op1;
2560
    u2.l = op2;
2561
    return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2562
}
2563

    
2564
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2565
{
2566
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2567
    return efststlt(op1, op2);
2568
}
2569

    
2570
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2571
{
2572
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2573
    return efststgt(op1, op2);
2574
}
2575

    
2576
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2577
{
2578
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2579
    return efststeq(op1, op2);
2580
}
2581

    
2582
#define HELPER_SINGLE_SPE_CMP(name)                                           \
2583
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2584
{                                                                             \
2585
    return e##name(op1, op2) << 2;                                            \
2586
}
2587
/* efststlt */
2588
HELPER_SINGLE_SPE_CMP(fststlt);
2589
/* efststgt */
2590
HELPER_SINGLE_SPE_CMP(fststgt);
2591
/* efststeq */
2592
HELPER_SINGLE_SPE_CMP(fststeq);
2593
/* efscmplt */
2594
HELPER_SINGLE_SPE_CMP(fscmplt);
2595
/* efscmpgt */
2596
HELPER_SINGLE_SPE_CMP(fscmpgt);
2597
/* efscmpeq */
2598
HELPER_SINGLE_SPE_CMP(fscmpeq);
2599

    
2600
static always_inline uint32_t evcmp_merge (int t0, int t1)
2601
{
2602
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2603
}
2604

    
2605
#define HELPER_VECTOR_SPE_CMP(name)                                           \
2606
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2607
{                                                                             \
2608
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
2609
}
2610
/* evfststlt */
2611
HELPER_VECTOR_SPE_CMP(fststlt);
2612
/* evfststgt */
2613
HELPER_VECTOR_SPE_CMP(fststgt);
2614
/* evfststeq */
2615
HELPER_VECTOR_SPE_CMP(fststeq);
2616
/* evfscmplt */
2617
HELPER_VECTOR_SPE_CMP(fscmplt);
2618
/* evfscmpgt */
2619
HELPER_VECTOR_SPE_CMP(fscmpgt);
2620
/* evfscmpeq */
2621
HELPER_VECTOR_SPE_CMP(fscmpeq);
2622

    
2623
/* Double-precision floating-point conversion */
2624
uint64_t helper_efdcfsi (uint32_t val)
2625
{
2626
    CPU_DoubleU u;
2627

    
2628
    u.d = int32_to_float64(val, &env->spe_status);
2629

    
2630
    return u.ll;
2631
}
2632

    
2633
uint64_t helper_efdcfsid (uint64_t val)
2634
{
2635
    CPU_DoubleU u;
2636

    
2637
    u.d = int64_to_float64(val, &env->spe_status);
2638

    
2639
    return u.ll;
2640
}
2641

    
2642
uint64_t helper_efdcfui (uint32_t val)
2643
{
2644
    CPU_DoubleU u;
2645

    
2646
    u.d = uint32_to_float64(val, &env->spe_status);
2647

    
2648
    return u.ll;
2649
}
2650

    
2651
uint64_t helper_efdcfuid (uint64_t val)
2652
{
2653
    CPU_DoubleU u;
2654

    
2655
    u.d = uint64_to_float64(val, &env->spe_status);
2656

    
2657
    return u.ll;
2658
}
2659

    
2660
uint32_t helper_efdctsi (uint64_t val)
2661
{
2662
    CPU_DoubleU u;
2663

    
2664
    u.ll = val;
2665
    /* NaN are not treated the same way IEEE 754 does */
2666
    if (unlikely(float64_is_nan(u.d)))
2667
        return 0;
2668

    
2669
    return float64_to_int32(u.d, &env->spe_status);
2670
}
2671

    
2672
uint32_t helper_efdctui (uint64_t val)
2673
{
2674
    CPU_DoubleU u;
2675

    
2676
    u.ll = val;
2677
    /* NaN are not treated the same way IEEE 754 does */
2678
    if (unlikely(float64_is_nan(u.d)))
2679
        return 0;
2680

    
2681
    return float64_to_uint32(u.d, &env->spe_status);
2682
}
2683

    
2684
uint32_t helper_efdctsiz (uint64_t val)
2685
{
2686
    CPU_DoubleU u;
2687

    
2688
    u.ll = val;
2689
    /* NaN are not treated the same way IEEE 754 does */
2690
    if (unlikely(float64_is_nan(u.d)))
2691
        return 0;
2692

    
2693
    return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2694
}
2695

    
2696
uint64_t helper_efdctsidz (uint64_t val)
2697
{
2698
    CPU_DoubleU u;
2699

    
2700
    u.ll = val;
2701
    /* NaN are not treated the same way IEEE 754 does */
2702
    if (unlikely(float64_is_nan(u.d)))
2703
        return 0;
2704

    
2705
    return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2706
}
2707

    
2708
uint32_t helper_efdctuiz (uint64_t val)
2709
{
2710
    CPU_DoubleU u;
2711

    
2712
    u.ll = val;
2713
    /* NaN are not treated the same way IEEE 754 does */
2714
    if (unlikely(float64_is_nan(u.d)))
2715
        return 0;
2716

    
2717
    return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2718
}
2719

    
2720
uint64_t helper_efdctuidz (uint64_t val)
2721
{
2722
    CPU_DoubleU u;
2723

    
2724
    u.ll = val;
2725
    /* NaN are not treated the same way IEEE 754 does */
2726
    if (unlikely(float64_is_nan(u.d)))
2727
        return 0;
2728

    
2729
    return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2730
}
2731

    
2732
uint64_t helper_efdcfsf (uint32_t val)
2733
{
2734
    CPU_DoubleU u;
2735
    float64 tmp;
2736

    
2737
    u.d = int32_to_float64(val, &env->spe_status);
2738
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2739
    u.d = float64_div(u.d, tmp, &env->spe_status);
2740

    
2741
    return u.ll;
2742
}
2743

    
2744
uint64_t helper_efdcfuf (uint32_t val)
2745
{
2746
    CPU_DoubleU u;
2747
    float64 tmp;
2748

    
2749
    u.d = uint32_to_float64(val, &env->spe_status);
2750
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2751
    u.d = float64_div(u.d, tmp, &env->spe_status);
2752

    
2753
    return u.ll;
2754
}
2755

    
2756
uint32_t helper_efdctsf (uint64_t val)
2757
{
2758
    CPU_DoubleU u;
2759
    float64 tmp;
2760

    
2761
    u.ll = val;
2762
    /* NaN are not treated the same way IEEE 754 does */
2763
    if (unlikely(float64_is_nan(u.d)))
2764
        return 0;
2765
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2766
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2767

    
2768
    return float64_to_int32(u.d, &env->spe_status);
2769
}
2770

    
2771
uint32_t helper_efdctuf (uint64_t val)
2772
{
2773
    CPU_DoubleU u;
2774
    float64 tmp;
2775

    
2776
    u.ll = val;
2777
    /* NaN are not treated the same way IEEE 754 does */
2778
    if (unlikely(float64_is_nan(u.d)))
2779
        return 0;
2780
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2781
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2782

    
2783
    return float64_to_uint32(u.d, &env->spe_status);
2784
}
2785

    
2786
uint32_t helper_efscfd (uint64_t val)
2787
{
2788
    CPU_DoubleU u1;
2789
    CPU_FloatU u2;
2790

    
2791
    u1.ll = val;
2792
    u2.f = float64_to_float32(u1.d, &env->spe_status);
2793

    
2794
    return u2.l;
2795
}
2796

    
2797
uint64_t helper_efdcfs (uint32_t val)
2798
{
2799
    CPU_DoubleU u2;
2800
    CPU_FloatU u1;
2801

    
2802
    u1.l = val;
2803
    u2.d = float32_to_float64(u1.f, &env->spe_status);
2804

    
2805
    return u2.ll;
2806
}
2807

    
2808
/* Double precision fixed-point arithmetic */
2809
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2810
{
2811
    CPU_DoubleU u1, u2;
2812
    u1.ll = op1;
2813
    u2.ll = op2;
2814
    u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2815
    return u1.ll;
2816
}
2817

    
2818
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2819
{
2820
    CPU_DoubleU u1, u2;
2821
    u1.ll = op1;
2822
    u2.ll = op2;
2823
    u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2824
    return u1.ll;
2825
}
2826

    
2827
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2828
{
2829
    CPU_DoubleU u1, u2;
2830
    u1.ll = op1;
2831
    u2.ll = op2;
2832
    u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2833
    return u1.ll;
2834
}
2835

    
2836
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2837
{
2838
    CPU_DoubleU u1, u2;
2839
    u1.ll = op1;
2840
    u2.ll = op2;
2841
    u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2842
    return u1.ll;
2843
}
2844

    
2845
/* Double precision floating point helpers */
2846
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2847
{
2848
    CPU_DoubleU u1, u2;
2849
    u1.ll = op1;
2850
    u2.ll = op2;
2851
    return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2852
}
2853

    
2854
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2855
{
2856
    CPU_DoubleU u1, u2;
2857
    u1.ll = op1;
2858
    u2.ll = op2;
2859
    return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2860
}
2861

    
2862
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2863
{
2864
    CPU_DoubleU u1, u2;
2865
    u1.ll = op1;
2866
    u2.ll = op2;
2867
    return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2868
}
2869

    
2870
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2871
{
2872
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2873
    return helper_efdtstlt(op1, op2);
2874
}
2875

    
2876
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2877
{
2878
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2879
    return helper_efdtstgt(op1, op2);
2880
}
2881

    
2882
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2883
{
2884
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2885
    return helper_efdtsteq(op1, op2);
2886
}
2887

    
2888
/*****************************************************************************/
2889
/* Softmmu support */
2890
#if !defined (CONFIG_USER_ONLY)
2891

    
2892
#define MMUSUFFIX _mmu
2893

    
2894
#define SHIFT 0
2895
#include "softmmu_template.h"
2896

    
2897
#define SHIFT 1
2898
#include "softmmu_template.h"
2899

    
2900
#define SHIFT 2
2901
#include "softmmu_template.h"
2902

    
2903
#define SHIFT 3
2904
#include "softmmu_template.h"
2905

    
2906
/* try to fill the TLB and return an exception if error. If retaddr is
2907
   NULL, it means that the function was called in C code (i.e. not
2908
   from generated code or from helper.c) */
2909
/* XXX: fix it to restore all registers */
2910
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2911
{
2912
    TranslationBlock *tb;
2913
    CPUState *saved_env;
2914
    unsigned long pc;
2915
    int ret;
2916

    
2917
    /* XXX: hack to restore env in all cases, even if not called from
2918
       generated code */
2919
    saved_env = env;
2920
    env = cpu_single_env;
2921
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2922
    if (unlikely(ret != 0)) {
2923
        if (likely(retaddr)) {
2924
            /* now we have a real cpu fault */
2925
            pc = (unsigned long)retaddr;
2926
            tb = tb_find_pc(pc);
2927
            if (likely(tb)) {
2928
                /* the PC is inside the translated code. It means that we have
2929
                   a virtual CPU fault */
2930
                cpu_restore_state(tb, env, pc, NULL);
2931
            }
2932
        }
2933
        helper_raise_exception_err(env->exception_index, env->error_code);
2934
    }
2935
    env = saved_env;
2936
}
2937

    
2938
/* Segment registers load and store */
2939
target_ulong helper_load_sr (target_ulong sr_num)
2940
{
2941
    return env->sr[sr_num];
2942
}
2943

    
2944
void helper_store_sr (target_ulong sr_num, target_ulong val)
2945
{
2946
    ppc_store_sr(env, sr_num, val);
2947
}
2948

    
2949
/* SLB management */
2950
#if defined(TARGET_PPC64)
2951
target_ulong helper_load_slb (target_ulong slb_nr)
2952
{
2953
    return ppc_load_slb(env, slb_nr);
2954
}
2955

    
2956
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
2957
{
2958
    ppc_store_slb(env, slb_nr, rs);
2959
}
2960

    
2961
void helper_slbia (void)
2962
{
2963
    ppc_slb_invalidate_all(env);
2964
}
2965

    
2966
void helper_slbie (target_ulong addr)
2967
{
2968
    ppc_slb_invalidate_one(env, addr);
2969
}
2970

    
2971
#endif /* defined(TARGET_PPC64) */
2972

    
2973
/* TLB management */
2974
void helper_tlbia (void)
2975
{
2976
    ppc_tlb_invalidate_all(env);
2977
}
2978

    
2979
void helper_tlbie (target_ulong addr)
2980
{
2981
    ppc_tlb_invalidate_one(env, addr);
2982
}
2983

    
2984
/* Software driven TLBs management */
2985
/* PowerPC 602/603 software TLB load instructions helpers */
2986
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
2987
{
2988
    target_ulong RPN, CMP, EPN;
2989
    int way;
2990

    
2991
    RPN = env->spr[SPR_RPA];
2992
    if (is_code) {
2993
        CMP = env->spr[SPR_ICMP];
2994
        EPN = env->spr[SPR_IMISS];
2995
    } else {
2996
        CMP = env->spr[SPR_DCMP];
2997
        EPN = env->spr[SPR_DMISS];
2998
    }
2999
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3000
#if defined (DEBUG_SOFTWARE_TLB)
3001
    if (loglevel != 0) {
3002
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3003
                " PTE1 " ADDRX " way %d\n",
3004
                __func__, new_EPN, EPN, CMP, RPN, way);
3005
    }
3006
#endif
3007
    /* Store this TLB */
3008
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3009
                     way, is_code, CMP, RPN);
3010
}
3011

    
3012
void helper_6xx_tlbd (target_ulong EPN)
3013
{
3014
    do_6xx_tlb(EPN, 0);
3015
}
3016

    
3017
void helper_6xx_tlbi (target_ulong EPN)
3018
{
3019
    do_6xx_tlb(EPN, 1);
3020
}
3021

    
3022
/* PowerPC 74xx software TLB load instructions helpers */
3023
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3024
{
3025
    target_ulong RPN, CMP, EPN;
3026
    int way;
3027

    
3028
    RPN = env->spr[SPR_PTELO];
3029
    CMP = env->spr[SPR_PTEHI];
3030
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3031
    way = env->spr[SPR_TLBMISS] & 0x3;
3032
#if defined (DEBUG_SOFTWARE_TLB)
3033
    if (loglevel != 0) {
3034
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3035
                " PTE1 " ADDRX " way %d\n",
3036
                __func__, new_EPN, EPN, CMP, RPN, way);
3037
    }
3038
#endif
3039
    /* Store this TLB */
3040
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3041
                     way, is_code, CMP, RPN);
3042
}
3043

    
3044
void helper_74xx_tlbd (target_ulong EPN)
3045
{
3046
    do_74xx_tlb(EPN, 0);
3047
}
3048

    
3049
void helper_74xx_tlbi (target_ulong EPN)
3050
{
3051
    do_74xx_tlb(EPN, 1);
3052
}
3053

    
3054
static always_inline target_ulong booke_tlb_to_page_size (int size)
3055
{
3056
    return 1024 << (2 * size);
3057
}
3058

    
3059
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3060
{
3061
    int size;
3062

    
3063
    switch (page_size) {
3064
    case 0x00000400UL:
3065
        size = 0x0;
3066
        break;
3067
    case 0x00001000UL:
3068
        size = 0x1;
3069
        break;
3070
    case 0x00004000UL:
3071
        size = 0x2;
3072
        break;
3073
    case 0x00010000UL:
3074
        size = 0x3;
3075
        break;
3076
    case 0x00040000UL:
3077
        size = 0x4;
3078
        break;
3079
    case 0x00100000UL:
3080
        size = 0x5;
3081
        break;
3082
    case 0x00400000UL:
3083
        size = 0x6;
3084
        break;
3085
    case 0x01000000UL:
3086
        size = 0x7;
3087
        break;
3088
    case 0x04000000UL:
3089
        size = 0x8;
3090
        break;
3091
    case 0x10000000UL:
3092
        size = 0x9;
3093
        break;
3094
    case 0x40000000UL:
3095
        size = 0xA;
3096
        break;
3097
#if defined (TARGET_PPC64)
3098
    case 0x000100000000ULL:
3099
        size = 0xB;
3100
        break;
3101
    case 0x000400000000ULL:
3102
        size = 0xC;
3103
        break;
3104
    case 0x001000000000ULL:
3105
        size = 0xD;
3106
        break;
3107
    case 0x004000000000ULL:
3108
        size = 0xE;
3109
        break;
3110
    case 0x010000000000ULL:
3111
        size = 0xF;
3112
        break;
3113
#endif
3114
    default:
3115
        size = -1;
3116
        break;
3117
    }
3118

    
3119
    return size;
3120
}
3121

    
3122
/* Helpers for 4xx TLB management */
3123
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3124
{
3125
    ppcemb_tlb_t *tlb;
3126
    target_ulong ret;
3127
    int size;
3128

    
3129
    entry &= 0x3F;
3130
    tlb = &env->tlb[entry].tlbe;
3131
    ret = tlb->EPN;
3132
    if (tlb->prot & PAGE_VALID)
3133
        ret |= 0x400;
3134
    size = booke_page_size_to_tlb(tlb->size);
3135
    if (size < 0 || size > 0x7)
3136
        size = 1;
3137
    ret |= size << 7;
3138
    env->spr[SPR_40x_PID] = tlb->PID;
3139
    return ret;
3140
}
3141

    
3142
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3143
{
3144
    ppcemb_tlb_t *tlb;
3145
    target_ulong ret;
3146

    
3147
    entry &= 0x3F;
3148
    tlb = &env->tlb[entry].tlbe;
3149
    ret = tlb->RPN;
3150
    if (tlb->prot & PAGE_EXEC)
3151
        ret |= 0x200;
3152
    if (tlb->prot & PAGE_WRITE)
3153
        ret |= 0x100;
3154
    return ret;
3155
}
3156

    
3157
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3158
{
3159
    ppcemb_tlb_t *tlb;
3160
    target_ulong page, end;
3161

    
3162
#if defined (DEBUG_SOFTWARE_TLB)
3163
    if (loglevel != 0) {
3164
        fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3165
    }
3166
#endif
3167
    entry &= 0x3F;
3168
    tlb = &env->tlb[entry].tlbe;
3169
    /* Invalidate previous TLB (if it's valid) */
3170
    if (tlb->prot & PAGE_VALID) {
3171
        end = tlb->EPN + tlb->size;
3172
#if defined (DEBUG_SOFTWARE_TLB)
3173
        if (loglevel != 0) {
3174
            fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
3175
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3176
        }
3177
#endif
3178
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3179
            tlb_flush_page(env, page);
3180
    }
3181
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3182
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3183
     * If this ever occurs, one should use the ppcemb target instead
3184
     * of the ppc or ppc64 one
3185
     */
3186
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3187
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3188
                  "are not supported (%d)\n",
3189
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3190
    }
3191
    tlb->EPN = val & ~(tlb->size - 1);
3192
    if (val & 0x40)
3193
        tlb->prot |= PAGE_VALID;
3194
    else
3195
        tlb->prot &= ~PAGE_VALID;
3196
    if (val & 0x20) {
3197
        /* XXX: TO BE FIXED */
3198
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3199
    }
3200
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3201
    tlb->attr = val & 0xFF;
3202
#if defined (DEBUG_SOFTWARE_TLB)
3203
    if (loglevel != 0) {
3204
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3205
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3206
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3207
                tlb->prot & PAGE_READ ? 'r' : '-',
3208
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3209
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3210
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3211
    }
3212
#endif
3213
    /* Invalidate new TLB (if valid) */
3214
    if (tlb->prot & PAGE_VALID) {
3215
        end = tlb->EPN + tlb->size;
3216
#if defined (DEBUG_SOFTWARE_TLB)
3217
        if (loglevel != 0) {
3218
            fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
3219
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3220
        }
3221
#endif
3222
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3223
            tlb_flush_page(env, page);
3224
    }
3225
}
3226

    
3227
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3228
{
3229
    ppcemb_tlb_t *tlb;
3230

    
3231
#if defined (DEBUG_SOFTWARE_TLB)
3232
    if (loglevel != 0) {
3233
        fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3234
    }
3235
#endif
3236
    entry &= 0x3F;
3237
    tlb = &env->tlb[entry].tlbe;
3238
    tlb->RPN = val & 0xFFFFFC00;
3239
    tlb->prot = PAGE_READ;
3240
    if (val & 0x200)
3241
        tlb->prot |= PAGE_EXEC;
3242
    if (val & 0x100)
3243
        tlb->prot |= PAGE_WRITE;
3244
#if defined (DEBUG_SOFTWARE_TLB)
3245
    if (loglevel != 0) {
3246
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3247
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3248
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3249
                tlb->prot & PAGE_READ ? 'r' : '-',
3250
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3251
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3252
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3253
    }
3254
#endif
3255
}
3256

    
3257
target_ulong helper_4xx_tlbsx (target_ulong address)
3258
{
3259
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3260
}
3261

    
3262
/* PowerPC 440 TLB management */
3263
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3264
{
3265
    ppcemb_tlb_t *tlb;
3266
    target_ulong EPN, RPN, size;
3267
    int do_flush_tlbs;
3268

    
3269
#if defined (DEBUG_SOFTWARE_TLB)
3270
    if (loglevel != 0) {
3271
        fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3272
                __func__, word, (int)entry, value);
3273
    }
3274
#endif
3275
    do_flush_tlbs = 0;
3276
    entry &= 0x3F;
3277
    tlb = &env->tlb[entry].tlbe;
3278
    switch (word) {
3279
    default:
3280
        /* Just here to please gcc */
3281
    case 0:
3282
        EPN = value & 0xFFFFFC00;
3283
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3284
            do_flush_tlbs = 1;
3285
        tlb->EPN = EPN;
3286
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3287
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3288
            do_flush_tlbs = 1;
3289
        tlb->size = size;
3290
        tlb->attr &= ~0x1;
3291
        tlb->attr |= (value >> 8) & 1;
3292
        if (value & 0x200) {
3293
            tlb->prot |= PAGE_VALID;
3294
        } else {
3295
            if (tlb->prot & PAGE_VALID) {
3296
                tlb->prot &= ~PAGE_VALID;
3297
                do_flush_tlbs = 1;
3298
            }
3299
        }
3300
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3301
        if (do_flush_tlbs)
3302
            tlb_flush(env, 1);
3303
        break;
3304
    case 1:
3305
        RPN = value & 0xFFFFFC0F;
3306
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3307
            tlb_flush(env, 1);
3308
        tlb->RPN = RPN;
3309
        break;
3310
    case 2:
3311
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3312
        tlb->prot = tlb->prot & PAGE_VALID;
3313
        if (value & 0x1)
3314
            tlb->prot |= PAGE_READ << 4;
3315
        if (value & 0x2)
3316
            tlb->prot |= PAGE_WRITE << 4;
3317
        if (value & 0x4)
3318
            tlb->prot |= PAGE_EXEC << 4;
3319
        if (value & 0x8)
3320
            tlb->prot |= PAGE_READ;
3321
        if (value & 0x10)
3322
            tlb->prot |= PAGE_WRITE;
3323
        if (value & 0x20)
3324
            tlb->prot |= PAGE_EXEC;
3325
        break;
3326
    }
3327
}
3328

    
3329
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3330
{
3331
    ppcemb_tlb_t *tlb;
3332
    target_ulong ret;
3333
    int size;
3334

    
3335
    entry &= 0x3F;
3336
    tlb = &env->tlb[entry].tlbe;
3337
    switch (word) {
3338
    default:
3339
        /* Just here to please gcc */
3340
    case 0:
3341
        ret = tlb->EPN;
3342
        size = booke_page_size_to_tlb(tlb->size);
3343
        if (size < 0 || size > 0xF)
3344
            size = 1;
3345
        ret |= size << 4;
3346
        if (tlb->attr & 0x1)
3347
            ret |= 0x100;
3348
        if (tlb->prot & PAGE_VALID)
3349
            ret |= 0x200;
3350
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3351
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3352
        break;
3353
    case 1:
3354
        ret = tlb->RPN;
3355
        break;
3356
    case 2:
3357
        ret = tlb->attr & ~0x1;
3358
        if (tlb->prot & (PAGE_READ << 4))
3359
            ret |= 0x1;
3360
        if (tlb->prot & (PAGE_WRITE << 4))
3361
            ret |= 0x2;
3362
        if (tlb->prot & (PAGE_EXEC << 4))
3363
            ret |= 0x4;
3364
        if (tlb->prot & PAGE_READ)
3365
            ret |= 0x8;
3366
        if (tlb->prot & PAGE_WRITE)
3367
            ret |= 0x10;
3368
        if (tlb->prot & PAGE_EXEC)
3369
            ret |= 0x20;
3370
        break;
3371
    }
3372
    return ret;
3373
}
3374

    
3375
target_ulong helper_440_tlbsx (target_ulong address)
3376
{
3377
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3378
}
3379

    
3380
#endif /* !CONFIG_USER_ONLY */