Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ b04ae981

History | View | Annotate | Download (97 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33

    
34
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35
{
36
#if 0
37
    printf("Raise exception %3x code : %d\n", exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
/*****************************************************************************/
50
/* Registers load and stores */
51
target_ulong helper_load_cr (void)
52
{
53
    return (env->crf[0] << 28) |
54
           (env->crf[1] << 24) |
55
           (env->crf[2] << 20) |
56
           (env->crf[3] << 16) |
57
           (env->crf[4] << 12) |
58
           (env->crf[5] << 8) |
59
           (env->crf[6] << 4) |
60
           (env->crf[7] << 0);
61
}
62

    
63
void helper_store_cr (target_ulong val, uint32_t mask)
64
{
65
    int i, sh;
66

    
67
    for (i = 0, sh = 7; i < 8; i++, sh--) {
68
        if (mask & (1 << sh))
69
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
70
    }
71
}
72

    
73
/*****************************************************************************/
74
/* SPR accesses */
75
void helper_load_dump_spr (uint32_t sprn)
76
{
77
    if (loglevel != 0) {
78
        fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
79
                sprn, sprn, env->spr[sprn]);
80
    }
81
}
82

    
83
void helper_store_dump_spr (uint32_t sprn)
84
{
85
    if (loglevel != 0) {
86
        fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
87
                sprn, sprn, env->spr[sprn]);
88
    }
89
}
90

    
91
target_ulong helper_load_tbl (void)
92
{
93
    return cpu_ppc_load_tbl(env);
94
}
95

    
96
target_ulong helper_load_tbu (void)
97
{
98
    return cpu_ppc_load_tbu(env);
99
}
100

    
101
target_ulong helper_load_atbl (void)
102
{
103
    return cpu_ppc_load_atbl(env);
104
}
105

    
106
target_ulong helper_load_atbu (void)
107
{
108
    return cpu_ppc_load_atbu(env);
109
}
110

    
111
target_ulong helper_load_601_rtcl (void)
112
{
113
    return cpu_ppc601_load_rtcl(env);
114
}
115

    
116
target_ulong helper_load_601_rtcu (void)
117
{
118
    return cpu_ppc601_load_rtcu(env);
119
}
120

    
121
#if !defined(CONFIG_USER_ONLY)
122
#if defined (TARGET_PPC64)
123
void helper_store_asr (target_ulong val)
124
{
125
    ppc_store_asr(env, val);
126
}
127
#endif
128

    
129
void helper_store_sdr1 (target_ulong val)
130
{
131
    ppc_store_sdr1(env, val);
132
}
133

    
134
void helper_store_tbl (target_ulong val)
135
{
136
    cpu_ppc_store_tbl(env, val);
137
}
138

    
139
void helper_store_tbu (target_ulong val)
140
{
141
    cpu_ppc_store_tbu(env, val);
142
}
143

    
144
void helper_store_atbl (target_ulong val)
145
{
146
    cpu_ppc_store_atbl(env, val);
147
}
148

    
149
void helper_store_atbu (target_ulong val)
150
{
151
    cpu_ppc_store_atbu(env, val);
152
}
153

    
154
void helper_store_601_rtcl (target_ulong val)
155
{
156
    cpu_ppc601_store_rtcl(env, val);
157
}
158

    
159
void helper_store_601_rtcu (target_ulong val)
160
{
161
    cpu_ppc601_store_rtcu(env, val);
162
}
163

    
164
target_ulong helper_load_decr (void)
165
{
166
    return cpu_ppc_load_decr(env);
167
}
168

    
169
void helper_store_decr (target_ulong val)
170
{
171
    cpu_ppc_store_decr(env, val);
172
}
173

    
174
void helper_store_hid0_601 (target_ulong val)
175
{
176
    target_ulong hid0;
177

    
178
    hid0 = env->spr[SPR_HID0];
179
    if ((val ^ hid0) & 0x00000008) {
180
        /* Change current endianness */
181
        env->hflags &= ~(1 << MSR_LE);
182
        env->hflags_nmsr &= ~(1 << MSR_LE);
183
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
184
        env->hflags |= env->hflags_nmsr;
185
        if (loglevel != 0) {
186
            fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
187
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
188
        }
189
    }
190
    env->spr[SPR_HID0] = (uint32_t)val;
191
}
192

    
193
void helper_store_403_pbr (uint32_t num, target_ulong value)
194
{
195
    if (likely(env->pb[num] != value)) {
196
        env->pb[num] = value;
197
        /* Should be optimized */
198
        tlb_flush(env, 1);
199
    }
200
}
201

    
202
target_ulong helper_load_40x_pit (void)
203
{
204
    return load_40x_pit(env);
205
}
206

    
207
void helper_store_40x_pit (target_ulong val)
208
{
209
    store_40x_pit(env, val);
210
}
211

    
212
void helper_store_40x_dbcr0 (target_ulong val)
213
{
214
    store_40x_dbcr0(env, val);
215
}
216

    
217
void helper_store_40x_sler (target_ulong val)
218
{
219
    store_40x_sler(env, val);
220
}
221

    
222
void helper_store_booke_tcr (target_ulong val)
223
{
224
    store_booke_tcr(env, val);
225
}
226

    
227
void helper_store_booke_tsr (target_ulong val)
228
{
229
    store_booke_tsr(env, val);
230
}
231

    
232
void helper_store_ibatu (uint32_t nr, target_ulong val)
233
{
234
    ppc_store_ibatu(env, nr, val);
235
}
236

    
237
void helper_store_ibatl (uint32_t nr, target_ulong val)
238
{
239
    ppc_store_ibatl(env, nr, val);
240
}
241

    
242
void helper_store_dbatu (uint32_t nr, target_ulong val)
243
{
244
    ppc_store_dbatu(env, nr, val);
245
}
246

    
247
void helper_store_dbatl (uint32_t nr, target_ulong val)
248
{
249
    ppc_store_dbatl(env, nr, val);
250
}
251

    
252
void helper_store_601_batl (uint32_t nr, target_ulong val)
253
{
254
    ppc_store_ibatl_601(env, nr, val);
255
}
256

    
257
void helper_store_601_batu (uint32_t nr, target_ulong val)
258
{
259
    ppc_store_ibatu_601(env, nr, val);
260
}
261
#endif
262

    
263
/*****************************************************************************/
264
/* Memory load and stores */
265

    
266
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267
{
268
#if defined(TARGET_PPC64)
269
        if (!msr_sf)
270
            return (uint32_t)(addr + arg);
271
        else
272
#endif
273
            return addr + arg;
274
}
275

    
276
void helper_lmw (target_ulong addr, uint32_t reg)
277
{
278
    for (; reg < 32; reg++) {
279
        if (msr_le)
280
            env->gpr[reg] = bswap32(ldl(addr));
281
        else
282
            env->gpr[reg] = ldl(addr);
283
        addr = addr_add(addr, 4);
284
    }
285
}
286

    
287
void helper_stmw (target_ulong addr, uint32_t reg)
288
{
289
    for (; reg < 32; reg++) {
290
        if (msr_le)
291
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
292
        else
293
            stl(addr, (uint32_t)env->gpr[reg]);
294
        addr = addr_add(addr, 4);
295
    }
296
}
297

    
298
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299
{
300
    int sh;
301
    for (; nb > 3; nb -= 4) {
302
        env->gpr[reg] = ldl(addr);
303
        reg = (reg + 1) % 32;
304
        addr = addr_add(addr, 4);
305
    }
306
    if (unlikely(nb > 0)) {
307
        env->gpr[reg] = 0;
308
        for (sh = 24; nb > 0; nb--, sh -= 8) {
309
            env->gpr[reg] |= ldub(addr) << sh;
310
            addr = addr_add(addr, 1);
311
        }
312
    }
313
}
314
/* PPC32 specification says we must generate an exception if
315
 * rA is in the range of registers to be loaded.
316
 * In an other hand, IBM says this is valid, but rA won't be loaded.
317
 * For now, I'll follow the spec...
318
 */
319
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320
{
321
    if (likely(xer_bc != 0)) {
322
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
323
                     (reg < rb && (reg + xer_bc) > rb))) {
324
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
325
                                       POWERPC_EXCP_INVAL |
326
                                       POWERPC_EXCP_INVAL_LSWX);
327
        } else {
328
            helper_lsw(addr, xer_bc, reg);
329
        }
330
    }
331
}
332

    
333
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334
{
335
    int sh;
336
    for (; nb > 3; nb -= 4) {
337
        stl(addr, env->gpr[reg]);
338
        reg = (reg + 1) % 32;
339
        addr = addr_add(addr, 4);
340
    }
341
    if (unlikely(nb > 0)) {
342
        for (sh = 24; nb > 0; nb--, sh -= 8) {
343
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
344
            addr = addr_add(addr, 1);
345
        }
346
    }
347
}
348

    
349
static void do_dcbz(target_ulong addr, int dcache_line_size)
350
{
351
    addr &= ~(dcache_line_size - 1);
352
    int i;
353
    for (i = 0 ; i < dcache_line_size ; i += 4) {
354
        stl(addr + i , 0);
355
    }
356
    if (env->reserve == addr)
357
        env->reserve = (target_ulong)-1ULL;
358
}
359

    
360
void helper_dcbz(target_ulong addr)
361
{
362
    do_dcbz(addr, env->dcache_line_size);
363
}
364

    
365
void helper_dcbz_970(target_ulong addr)
366
{
367
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
368
        do_dcbz(addr, 32);
369
    else
370
        do_dcbz(addr, env->dcache_line_size);
371
}
372

    
373
void helper_icbi(target_ulong addr)
374
{
375
    uint32_t tmp;
376

    
377
    addr &= ~(env->dcache_line_size - 1);
378
    /* Invalidate one cache line :
379
     * PowerPC specification says this is to be treated like a load
380
     * (not a fetch) by the MMU. To be sure it will be so,
381
     * do the load "by hand".
382
     */
383
    tmp = ldl(addr);
384
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
385
}
386

    
387
// XXX: to be tested
388
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
389
{
390
    int i, c, d;
391
    d = 24;
392
    for (i = 0; i < xer_bc; i++) {
393
        c = ldub(addr);
394
        addr = addr_add(addr, 1);
395
        /* ra (if not 0) and rb are never modified */
396
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
397
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
398
        }
399
        if (unlikely(c == xer_cmp))
400
            break;
401
        if (likely(d != 0)) {
402
            d -= 8;
403
        } else {
404
            d = 24;
405
            reg++;
406
            reg = reg & 0x1F;
407
        }
408
    }
409
    return i;
410
}
411

    
412
/*****************************************************************************/
413
/* Fixed point operations helpers */
414
#if defined(TARGET_PPC64)
415

    
416
/* multiply high word */
417
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
418
{
419
    uint64_t tl, th;
420

    
421
    muls64(&tl, &th, arg1, arg2);
422
    return th;
423
}
424

    
425
/* multiply high word unsigned */
426
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
427
{
428
    uint64_t tl, th;
429

    
430
    mulu64(&tl, &th, arg1, arg2);
431
    return th;
432
}
433

    
434
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
435
{
436
    int64_t th;
437
    uint64_t tl;
438

    
439
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
440
    /* If th != 0 && th != -1, then we had an overflow */
441
    if (likely((uint64_t)(th + 1) <= 1)) {
442
        env->xer &= ~(1 << XER_OV);
443
    } else {
444
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
445
    }
446
    return (int64_t)tl;
447
}
448
#endif
449

    
450
target_ulong helper_cntlzw (target_ulong t)
451
{
452
    return clz32(t);
453
}
454

    
455
#if defined(TARGET_PPC64)
456
target_ulong helper_cntlzd (target_ulong t)
457
{
458
    return clz64(t);
459
}
460
#endif
461

    
462
/* shift right arithmetic helper */
463
target_ulong helper_sraw (target_ulong value, target_ulong shift)
464
{
465
    int32_t ret;
466

    
467
    if (likely(!(shift & 0x20))) {
468
        if (likely((uint32_t)shift != 0)) {
469
            shift &= 0x1f;
470
            ret = (int32_t)value >> shift;
471
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
472
                env->xer &= ~(1 << XER_CA);
473
            } else {
474
                env->xer |= (1 << XER_CA);
475
            }
476
        } else {
477
            ret = (int32_t)value;
478
            env->xer &= ~(1 << XER_CA);
479
        }
480
    } else {
481
        ret = (int32_t)value >> 31;
482
        if (ret) {
483
            env->xer |= (1 << XER_CA);
484
        } else {
485
            env->xer &= ~(1 << XER_CA);
486
        }
487
    }
488
    return (target_long)ret;
489
}
490

    
491
#if defined(TARGET_PPC64)
492
target_ulong helper_srad (target_ulong value, target_ulong shift)
493
{
494
    int64_t ret;
495

    
496
    if (likely(!(shift & 0x40))) {
497
        if (likely((uint64_t)shift != 0)) {
498
            shift &= 0x3f;
499
            ret = (int64_t)value >> shift;
500
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
501
                env->xer &= ~(1 << XER_CA);
502
            } else {
503
                env->xer |= (1 << XER_CA);
504
            }
505
        } else {
506
            ret = (int64_t)value;
507
            env->xer &= ~(1 << XER_CA);
508
        }
509
    } else {
510
        ret = (int64_t)value >> 63;
511
        if (ret) {
512
            env->xer |= (1 << XER_CA);
513
        } else {
514
            env->xer &= ~(1 << XER_CA);
515
        }
516
    }
517
    return ret;
518
}
519
#endif
520

    
521
target_ulong helper_popcntb (target_ulong val)
522
{
523
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
524
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
525
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
526
    return val;
527
}
528

    
529
#if defined(TARGET_PPC64)
530
target_ulong helper_popcntb_64 (target_ulong val)
531
{
532
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
533
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
534
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
535
    return val;
536
}
537
#endif
538

    
539
/*****************************************************************************/
540
/* Floating point operations helpers */
541
uint64_t helper_float32_to_float64(uint32_t arg)
542
{
543
    CPU_FloatU f;
544
    CPU_DoubleU d;
545
    f.l = arg;
546
    d.d = float32_to_float64(f.f, &env->fp_status);
547
    return d.ll;
548
}
549

    
550
uint32_t helper_float64_to_float32(uint64_t arg)
551
{
552
    CPU_FloatU f;
553
    CPU_DoubleU d;
554
    d.ll = arg;
555
    f.f = float64_to_float32(d.d, &env->fp_status);
556
    return f.l;
557
}
558

    
559
static always_inline int isden (float64 d)
560
{
561
    CPU_DoubleU u;
562

    
563
    u.d = d;
564

    
565
    return ((u.ll >> 52) & 0x7FF) == 0;
566
}
567

    
568
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
569
{
570
    CPU_DoubleU farg;
571
    int isneg;
572
    int ret;
573
    farg.ll = arg;
574
    isneg = float64_is_neg(farg.d);
575
    if (unlikely(float64_is_nan(farg.d))) {
576
        if (float64_is_signaling_nan(farg.d)) {
577
            /* Signaling NaN: flags are undefined */
578
            ret = 0x00;
579
        } else {
580
            /* Quiet NaN */
581
            ret = 0x11;
582
        }
583
    } else if (unlikely(float64_is_infinity(farg.d))) {
584
        /* +/- infinity */
585
        if (isneg)
586
            ret = 0x09;
587
        else
588
            ret = 0x05;
589
    } else {
590
        if (float64_is_zero(farg.d)) {
591
            /* +/- zero */
592
            if (isneg)
593
                ret = 0x12;
594
            else
595
                ret = 0x02;
596
        } else {
597
            if (isden(farg.d)) {
598
                /* Denormalized numbers */
599
                ret = 0x10;
600
            } else {
601
                /* Normalized numbers */
602
                ret = 0x00;
603
            }
604
            if (isneg) {
605
                ret |= 0x08;
606
            } else {
607
                ret |= 0x04;
608
            }
609
        }
610
    }
611
    if (set_fprf) {
612
        /* We update FPSCR_FPRF */
613
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
614
        env->fpscr |= ret << FPSCR_FPRF;
615
    }
616
    /* We just need fpcc to update Rc1 */
617
    return ret & 0xF;
618
}
619

    
620
/* Floating-point invalid operations exception */
621
static always_inline uint64_t fload_invalid_op_excp (int op)
622
{
623
    uint64_t ret = 0;
624
    int ve;
625

    
626
    ve = fpscr_ve;
627
    switch (op) {
628
    case POWERPC_EXCP_FP_VXSNAN:
629
        env->fpscr |= 1 << FPSCR_VXSNAN;
630
        break;
631
    case POWERPC_EXCP_FP_VXSOFT:
632
        env->fpscr |= 1 << FPSCR_VXSOFT;
633
        break;
634
    case POWERPC_EXCP_FP_VXISI:
635
        /* Magnitude subtraction of infinities */
636
        env->fpscr |= 1 << FPSCR_VXISI;
637
        goto update_arith;
638
    case POWERPC_EXCP_FP_VXIDI:
639
        /* Division of infinity by infinity */
640
        env->fpscr |= 1 << FPSCR_VXIDI;
641
        goto update_arith;
642
    case POWERPC_EXCP_FP_VXZDZ:
643
        /* Division of zero by zero */
644
        env->fpscr |= 1 << FPSCR_VXZDZ;
645
        goto update_arith;
646
    case POWERPC_EXCP_FP_VXIMZ:
647
        /* Multiplication of zero by infinity */
648
        env->fpscr |= 1 << FPSCR_VXIMZ;
649
        goto update_arith;
650
    case POWERPC_EXCP_FP_VXVC:
651
        /* Ordered comparison of NaN */
652
        env->fpscr |= 1 << FPSCR_VXVC;
653
        env->fpscr &= ~(0xF << FPSCR_FPCC);
654
        env->fpscr |= 0x11 << FPSCR_FPCC;
655
        /* We must update the target FPR before raising the exception */
656
        if (ve != 0) {
657
            env->exception_index = POWERPC_EXCP_PROGRAM;
658
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
659
            /* Update the floating-point enabled exception summary */
660
            env->fpscr |= 1 << FPSCR_FEX;
661
            /* Exception is differed */
662
            ve = 0;
663
        }
664
        break;
665
    case POWERPC_EXCP_FP_VXSQRT:
666
        /* Square root of a negative number */
667
        env->fpscr |= 1 << FPSCR_VXSQRT;
668
    update_arith:
669
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670
        if (ve == 0) {
671
            /* Set the result to quiet NaN */
672
            ret = 0xFFF8000000000000ULL;
673
            env->fpscr &= ~(0xF << FPSCR_FPCC);
674
            env->fpscr |= 0x11 << FPSCR_FPCC;
675
        }
676
        break;
677
    case POWERPC_EXCP_FP_VXCVI:
678
        /* Invalid conversion */
679
        env->fpscr |= 1 << FPSCR_VXCVI;
680
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
681
        if (ve == 0) {
682
            /* Set the result to quiet NaN */
683
            ret = 0xFFF8000000000000ULL;
684
            env->fpscr &= ~(0xF << FPSCR_FPCC);
685
            env->fpscr |= 0x11 << FPSCR_FPCC;
686
        }
687
        break;
688
    }
689
    /* Update the floating-point invalid operation summary */
690
    env->fpscr |= 1 << FPSCR_VX;
691
    /* Update the floating-point exception summary */
692
    env->fpscr |= 1 << FPSCR_FX;
693
    if (ve != 0) {
694
        /* Update the floating-point enabled exception summary */
695
        env->fpscr |= 1 << FPSCR_FEX;
696
        if (msr_fe0 != 0 || msr_fe1 != 0)
697
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
698
    }
699
    return ret;
700
}
701

    
702
static always_inline void float_zero_divide_excp (void)
703
{
704
    env->fpscr |= 1 << FPSCR_ZX;
705
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706
    /* Update the floating-point exception summary */
707
    env->fpscr |= 1 << FPSCR_FX;
708
    if (fpscr_ze != 0) {
709
        /* Update the floating-point enabled exception summary */
710
        env->fpscr |= 1 << FPSCR_FEX;
711
        if (msr_fe0 != 0 || msr_fe1 != 0) {
712
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
713
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
714
        }
715
    }
716
}
717

    
718
static always_inline void float_overflow_excp (void)
719
{
720
    env->fpscr |= 1 << FPSCR_OX;
721
    /* Update the floating-point exception summary */
722
    env->fpscr |= 1 << FPSCR_FX;
723
    if (fpscr_oe != 0) {
724
        /* XXX: should adjust the result */
725
        /* Update the floating-point enabled exception summary */
726
        env->fpscr |= 1 << FPSCR_FEX;
727
        /* We must update the target FPR before raising the exception */
728
        env->exception_index = POWERPC_EXCP_PROGRAM;
729
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730
    } else {
731
        env->fpscr |= 1 << FPSCR_XX;
732
        env->fpscr |= 1 << FPSCR_FI;
733
    }
734
}
735

    
736
static always_inline void float_underflow_excp (void)
737
{
738
    env->fpscr |= 1 << FPSCR_UX;
739
    /* Update the floating-point exception summary */
740
    env->fpscr |= 1 << FPSCR_FX;
741
    if (fpscr_ue != 0) {
742
        /* XXX: should adjust the result */
743
        /* Update the floating-point enabled exception summary */
744
        env->fpscr |= 1 << FPSCR_FEX;
745
        /* We must update the target FPR before raising the exception */
746
        env->exception_index = POWERPC_EXCP_PROGRAM;
747
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748
    }
749
}
750

    
751
static always_inline void float_inexact_excp (void)
752
{
753
    env->fpscr |= 1 << FPSCR_XX;
754
    /* Update the floating-point exception summary */
755
    env->fpscr |= 1 << FPSCR_FX;
756
    if (fpscr_xe != 0) {
757
        /* Update the floating-point enabled exception summary */
758
        env->fpscr |= 1 << FPSCR_FEX;
759
        /* We must update the target FPR before raising the exception */
760
        env->exception_index = POWERPC_EXCP_PROGRAM;
761
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762
    }
763
}
764

    
765
static always_inline void fpscr_set_rounding_mode (void)
766
{
767
    int rnd_type;
768

    
769
    /* Set rounding mode */
770
    switch (fpscr_rn) {
771
    case 0:
772
        /* Best approximation (round to nearest) */
773
        rnd_type = float_round_nearest_even;
774
        break;
775
    case 1:
776
        /* Smaller magnitude (round toward zero) */
777
        rnd_type = float_round_to_zero;
778
        break;
779
    case 2:
780
        /* Round toward +infinite */
781
        rnd_type = float_round_up;
782
        break;
783
    default:
784
    case 3:
785
        /* Round toward -infinite */
786
        rnd_type = float_round_down;
787
        break;
788
    }
789
    set_float_rounding_mode(rnd_type, &env->fp_status);
790
}
791

    
792
void helper_fpscr_clrbit (uint32_t bit)
793
{
794
    int prev;
795

    
796
    prev = (env->fpscr >> bit) & 1;
797
    env->fpscr &= ~(1 << bit);
798
    if (prev == 1) {
799
        switch (bit) {
800
        case FPSCR_RN1:
801
        case FPSCR_RN:
802
            fpscr_set_rounding_mode();
803
            break;
804
        default:
805
            break;
806
        }
807
    }
808
}
809

    
810
void helper_fpscr_setbit (uint32_t bit)
811
{
812
    int prev;
813

    
814
    prev = (env->fpscr >> bit) & 1;
815
    env->fpscr |= 1 << bit;
816
    if (prev == 0) {
817
        switch (bit) {
818
        case FPSCR_VX:
819
            env->fpscr |= 1 << FPSCR_FX;
820
            if (fpscr_ve)
821
                goto raise_ve;
822
        case FPSCR_OX:
823
            env->fpscr |= 1 << FPSCR_FX;
824
            if (fpscr_oe)
825
                goto raise_oe;
826
            break;
827
        case FPSCR_UX:
828
            env->fpscr |= 1 << FPSCR_FX;
829
            if (fpscr_ue)
830
                goto raise_ue;
831
            break;
832
        case FPSCR_ZX:
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ze)
835
                goto raise_ze;
836
            break;
837
        case FPSCR_XX:
838
            env->fpscr |= 1 << FPSCR_FX;
839
            if (fpscr_xe)
840
                goto raise_xe;
841
            break;
842
        case FPSCR_VXSNAN:
843
        case FPSCR_VXISI:
844
        case FPSCR_VXIDI:
845
        case FPSCR_VXZDZ:
846
        case FPSCR_VXIMZ:
847
        case FPSCR_VXVC:
848
        case FPSCR_VXSOFT:
849
        case FPSCR_VXSQRT:
850
        case FPSCR_VXCVI:
851
            env->fpscr |= 1 << FPSCR_VX;
852
            env->fpscr |= 1 << FPSCR_FX;
853
            if (fpscr_ve != 0)
854
                goto raise_ve;
855
            break;
856
        case FPSCR_VE:
857
            if (fpscr_vx != 0) {
858
            raise_ve:
859
                env->error_code = POWERPC_EXCP_FP;
860
                if (fpscr_vxsnan)
861
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
862
                if (fpscr_vxisi)
863
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
864
                if (fpscr_vxidi)
865
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
866
                if (fpscr_vxzdz)
867
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
868
                if (fpscr_vximz)
869
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
870
                if (fpscr_vxvc)
871
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
872
                if (fpscr_vxsoft)
873
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
874
                if (fpscr_vxsqrt)
875
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
876
                if (fpscr_vxcvi)
877
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
878
                goto raise_excp;
879
            }
880
            break;
881
        case FPSCR_OE:
882
            if (fpscr_ox != 0) {
883
            raise_oe:
884
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
885
                goto raise_excp;
886
            }
887
            break;
888
        case FPSCR_UE:
889
            if (fpscr_ux != 0) {
890
            raise_ue:
891
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
892
                goto raise_excp;
893
            }
894
            break;
895
        case FPSCR_ZE:
896
            if (fpscr_zx != 0) {
897
            raise_ze:
898
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
899
                goto raise_excp;
900
            }
901
            break;
902
        case FPSCR_XE:
903
            if (fpscr_xx != 0) {
904
            raise_xe:
905
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
906
                goto raise_excp;
907
            }
908
            break;
909
        case FPSCR_RN1:
910
        case FPSCR_RN:
911
            fpscr_set_rounding_mode();
912
            break;
913
        default:
914
            break;
915
        raise_excp:
916
            /* Update the floating-point enabled exception summary */
917
            env->fpscr |= 1 << FPSCR_FEX;
918
                /* We have to update Rc1 before raising the exception */
919
            env->exception_index = POWERPC_EXCP_PROGRAM;
920
            break;
921
        }
922
    }
923
}
924

    
925
void helper_store_fpscr (uint64_t arg, uint32_t mask)
926
{
927
    /*
928
     * We use only the 32 LSB of the incoming fpr
929
     */
930
    uint32_t prev, new;
931
    int i;
932

    
933
    prev = env->fpscr;
934
    new = (uint32_t)arg;
935
    new &= ~0x60000000;
936
    new |= prev & 0x60000000;
937
    for (i = 0; i < 8; i++) {
938
        if (mask & (1 << i)) {
939
            env->fpscr &= ~(0xF << (4 * i));
940
            env->fpscr |= new & (0xF << (4 * i));
941
        }
942
    }
943
    /* Update VX and FEX */
944
    if (fpscr_ix != 0)
945
        env->fpscr |= 1 << FPSCR_VX;
946
    else
947
        env->fpscr &= ~(1 << FPSCR_VX);
948
    if ((fpscr_ex & fpscr_eex) != 0) {
949
        env->fpscr |= 1 << FPSCR_FEX;
950
        env->exception_index = POWERPC_EXCP_PROGRAM;
951
        /* XXX: we should compute it properly */
952
        env->error_code = POWERPC_EXCP_FP;
953
    }
954
    else
955
        env->fpscr &= ~(1 << FPSCR_FEX);
956
    fpscr_set_rounding_mode();
957
}
958

    
959
void helper_float_check_status (void)
960
{
961
#ifdef CONFIG_SOFTFLOAT
962
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
963
        (env->error_code & POWERPC_EXCP_FP)) {
964
        /* Differred floating-point exception after target FPR update */
965
        if (msr_fe0 != 0 || msr_fe1 != 0)
966
            helper_raise_exception_err(env->exception_index, env->error_code);
967
    } else {
968
        int status = get_float_exception_flags(&env->fp_status);
969
        if (status & float_flag_divbyzero) {
970
            float_zero_divide_excp();
971
        } else if (status & float_flag_overflow) {
972
            float_overflow_excp();
973
        } else if (status & float_flag_underflow) {
974
            float_underflow_excp();
975
        } else if (status & float_flag_inexact) {
976
            float_inexact_excp();
977
        }
978
    }
979
#else
980
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
981
        (env->error_code & POWERPC_EXCP_FP)) {
982
        /* Differred floating-point exception after target FPR update */
983
        if (msr_fe0 != 0 || msr_fe1 != 0)
984
            helper_raise_exception_err(env->exception_index, env->error_code);
985
    }
986
#endif
987
}
988

    
989
#ifdef CONFIG_SOFTFLOAT
990
void helper_reset_fpstatus (void)
991
{
992
    set_float_exception_flags(0, &env->fp_status);
993
}
994
#endif
995

    
996
/* fadd - fadd. */
997
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
998
{
999
    CPU_DoubleU farg1, farg2;
1000

    
1001
    farg1.ll = arg1;
1002
    farg2.ll = arg2;
1003
#if USE_PRECISE_EMULATION
1004
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005
                 float64_is_signaling_nan(farg2.d))) {
1006
        /* sNaN addition */
1007
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1010
        /* Magnitude subtraction of infinities */
1011
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012
    } else {
1013
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1014
    }
1015
#else
1016
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017
#endif
1018
    return farg1.ll;
1019
}
1020

    
1021
/* fsub - fsub. */
1022
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1023
{
1024
    CPU_DoubleU farg1, farg2;
1025

    
1026
    farg1.ll = arg1;
1027
    farg2.ll = arg2;
1028
#if USE_PRECISE_EMULATION
1029
{
1030
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031
                 float64_is_signaling_nan(farg2.d))) {
1032
        /* sNaN subtraction */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1035
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1036
        /* Magnitude subtraction of infinities */
1037
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1038
    } else {
1039
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1040
    }
1041
}
1042
#else
1043
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044
#endif
1045
    return farg1.ll;
1046
}
1047

    
1048
/* fmul - fmul. */
1049
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1050
{
1051
    CPU_DoubleU farg1, farg2;
1052

    
1053
    farg1.ll = arg1;
1054
    farg2.ll = arg2;
1055
#if USE_PRECISE_EMULATION
1056
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1057
                 float64_is_signaling_nan(farg2.d))) {
1058
        /* sNaN multiplication */
1059
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1060
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1061
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1062
        /* Multiplication of zero by infinity */
1063
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064
    } else {
1065
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1066
    }
1067
#else
1068
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069
#endif
1070
    return farg1.ll;
1071
}
1072

    
1073
/* fdiv - fdiv. */
1074
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1075
{
1076
    CPU_DoubleU farg1, farg2;
1077

    
1078
    farg1.ll = arg1;
1079
    farg2.ll = arg2;
1080
#if USE_PRECISE_EMULATION
1081
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1082
                 float64_is_signaling_nan(farg2.d))) {
1083
        /* sNaN division */
1084
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1085
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1086
        /* Division of infinity by infinity */
1087
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1088
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1089
        /* Division of zero by zero */
1090
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1091
    } else {
1092
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1093
    }
1094
#else
1095
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096
#endif
1097
    return farg1.ll;
1098
}
1099

    
1100
/* fabs */
1101
uint64_t helper_fabs (uint64_t arg)
1102
{
1103
    CPU_DoubleU farg;
1104

    
1105
    farg.ll = arg;
1106
    farg.d = float64_abs(farg.d);
1107
    return farg.ll;
1108
}
1109

    
1110
/* fnabs */
1111
uint64_t helper_fnabs (uint64_t arg)
1112
{
1113
    CPU_DoubleU farg;
1114

    
1115
    farg.ll = arg;
1116
    farg.d = float64_abs(farg.d);
1117
    farg.d = float64_chs(farg.d);
1118
    return farg.ll;
1119
}
1120

    
1121
/* fneg */
1122
uint64_t helper_fneg (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125

    
1126
    farg.ll = arg;
1127
    farg.d = float64_chs(farg.d);
1128
    return farg.ll;
1129
}
1130

    
1131
/* fctiw - fctiw. */
1132
uint64_t helper_fctiw (uint64_t arg)
1133
{
1134
    CPU_DoubleU farg;
1135
    farg.ll = arg;
1136

    
1137
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1138
        /* sNaN conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141
        /* qNan / infinity conversion */
1142
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143
    } else {
1144
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1145
#if USE_PRECISE_EMULATION
1146
        /* XXX: higher bits are not supposed to be significant.
1147
         *     to make tests easier, return the same as a real PowerPC 750
1148
         */
1149
        farg.ll |= 0xFFF80000ULL << 32;
1150
#endif
1151
    }
1152
    return farg.ll;
1153
}
1154

    
1155
/* fctiwz - fctiwz. */
1156
uint64_t helper_fctiwz (uint64_t arg)
1157
{
1158
    CPU_DoubleU farg;
1159
    farg.ll = arg;
1160

    
1161
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1162
        /* sNaN conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1164
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1165
        /* qNan / infinity conversion */
1166
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167
    } else {
1168
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1169
#if USE_PRECISE_EMULATION
1170
        /* XXX: higher bits are not supposed to be significant.
1171
         *     to make tests easier, return the same as a real PowerPC 750
1172
         */
1173
        farg.ll |= 0xFFF80000ULL << 32;
1174
#endif
1175
    }
1176
    return farg.ll;
1177
}
1178

    
1179
#if defined(TARGET_PPC64)
1180
/* fcfid - fcfid. */
1181
uint64_t helper_fcfid (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.d = int64_to_float64(arg, &env->fp_status);
1185
    return farg.ll;
1186
}
1187

    
1188
/* fctid - fctid. */
1189
uint64_t helper_fctid (uint64_t arg)
1190
{
1191
    CPU_DoubleU farg;
1192
    farg.ll = arg;
1193

    
1194
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1195
        /* sNaN conversion */
1196
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1197
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1198
        /* qNan / infinity conversion */
1199
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1200
    } else {
1201
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1202
    }
1203
    return farg.ll;
1204
}
1205

    
1206
/* fctidz - fctidz. */
1207
uint64_t helper_fctidz (uint64_t arg)
1208
{
1209
    CPU_DoubleU farg;
1210
    farg.ll = arg;
1211

    
1212
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1213
        /* sNaN conversion */
1214
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1215
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1216
        /* qNan / infinity conversion */
1217
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1218
    } else {
1219
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1220
    }
1221
    return farg.ll;
1222
}
1223

    
1224
#endif
1225

    
1226
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1227
{
1228
    CPU_DoubleU farg;
1229
    farg.ll = arg;
1230

    
1231
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1232
        /* sNaN round */
1233
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1234
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1235
        /* qNan / infinity round */
1236
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1237
    } else {
1238
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1239
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1240
        /* Restore rounding mode from FPSCR */
1241
        fpscr_set_rounding_mode();
1242
    }
1243
    return farg.ll;
1244
}
1245

    
1246
uint64_t helper_frin (uint64_t arg)
1247
{
1248
    return do_fri(arg, float_round_nearest_even);
1249
}
1250

    
1251
uint64_t helper_friz (uint64_t arg)
1252
{
1253
    return do_fri(arg, float_round_to_zero);
1254
}
1255

    
1256
uint64_t helper_frip (uint64_t arg)
1257
{
1258
    return do_fri(arg, float_round_up);
1259
}
1260

    
1261
uint64_t helper_frim (uint64_t arg)
1262
{
1263
    return do_fri(arg, float_round_down);
1264
}
1265

    
1266
/* fmadd - fmadd. */
1267
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1268
{
1269
    CPU_DoubleU farg1, farg2, farg3;
1270

    
1271
    farg1.ll = arg1;
1272
    farg2.ll = arg2;
1273
    farg3.ll = arg3;
1274
#if USE_PRECISE_EMULATION
1275
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1276
                 float64_is_signaling_nan(farg2.d) ||
1277
                 float64_is_signaling_nan(farg3.d))) {
1278
        /* sNaN operation */
1279
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1280
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1281
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1282
        /* Multiplication of zero by infinity */
1283
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1284
    } else {
1285
#ifdef FLOAT128
1286
        /* This is the way the PowerPC specification defines it */
1287
        float128 ft0_128, ft1_128;
1288

    
1289
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1290
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1291
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1292
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1293
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1294
            /* Magnitude subtraction of infinities */
1295
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1296
        } else {
1297
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1298
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1299
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1300
        }
1301
#else
1302
        /* This is OK on x86 hosts */
1303
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1304
#endif
1305
    }
1306
#else
1307
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1308
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1309
#endif
1310
    return farg1.ll;
1311
}
1312

    
1313
/* fmsub - fmsub. */
1314
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315
{
1316
    CPU_DoubleU farg1, farg2, farg3;
1317

    
1318
    farg1.ll = arg1;
1319
    farg2.ll = arg2;
1320
    farg3.ll = arg3;
1321
#if USE_PRECISE_EMULATION
1322
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323
                 float64_is_signaling_nan(farg2.d) ||
1324
                 float64_is_signaling_nan(farg3.d))) {
1325
        /* sNaN operation */
1326
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329
        /* Multiplication of zero by infinity */
1330
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331
    } else {
1332
#ifdef FLOAT128
1333
        /* This is the way the PowerPC specification defines it */
1334
        float128 ft0_128, ft1_128;
1335

    
1336
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1341
            /* Magnitude subtraction of infinities */
1342
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343
        } else {
1344
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1346
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347
        }
1348
#else
1349
        /* This is OK on x86 hosts */
1350
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1351
#endif
1352
    }
1353
#else
1354
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1355
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1356
#endif
1357
    return farg1.ll;
1358
}
1359

    
1360
/* fnmadd - fnmadd. */
1361
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1362
{
1363
    CPU_DoubleU farg1, farg2, farg3;
1364

    
1365
    farg1.ll = arg1;
1366
    farg2.ll = arg2;
1367
    farg3.ll = arg3;
1368

    
1369
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1370
                 float64_is_signaling_nan(farg2.d) ||
1371
                 float64_is_signaling_nan(farg3.d))) {
1372
        /* sNaN operation */
1373
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1374
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376
        /* Multiplication of zero by infinity */
1377
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378
    } else {
1379
#if USE_PRECISE_EMULATION
1380
#ifdef FLOAT128
1381
        /* This is the way the PowerPC specification defines it */
1382
        float128 ft0_128, ft1_128;
1383

    
1384
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1388
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1389
            /* Magnitude subtraction of infinities */
1390
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1391
        } else {
1392
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1393
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1394
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1395
        }
1396
#else
1397
        /* This is OK on x86 hosts */
1398
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1399
#endif
1400
#else
1401
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1402
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1403
#endif
1404
        if (likely(!float64_is_nan(farg1.d)))
1405
            farg1.d = float64_chs(farg1.d);
1406
    }
1407
    return farg1.ll;
1408
}
1409

    
1410
/* fnmsub - fnmsub. */
1411
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1412
{
1413
    CPU_DoubleU farg1, farg2, farg3;
1414

    
1415
    farg1.ll = arg1;
1416
    farg2.ll = arg2;
1417
    farg3.ll = arg3;
1418

    
1419
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1420
                 float64_is_signaling_nan(farg2.d) ||
1421
                 float64_is_signaling_nan(farg3.d))) {
1422
        /* sNaN operation */
1423
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1424
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1425
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1426
        /* Multiplication of zero by infinity */
1427
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1428
    } else {
1429
#if USE_PRECISE_EMULATION
1430
#ifdef FLOAT128
1431
        /* This is the way the PowerPC specification defines it */
1432
        float128 ft0_128, ft1_128;
1433

    
1434
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1435
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1436
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1437
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1438
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1439
            /* Magnitude subtraction of infinities */
1440
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1441
        } else {
1442
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445
        }
1446
#else
1447
        /* This is OK on x86 hosts */
1448
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1449
#endif
1450
#else
1451
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1452
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1453
#endif
1454
        if (likely(!float64_is_nan(farg1.d)))
1455
            farg1.d = float64_chs(farg1.d);
1456
    }
1457
    return farg1.ll;
1458
}
1459

    
1460
/* frsp - frsp. */
1461
uint64_t helper_frsp (uint64_t arg)
1462
{
1463
    CPU_DoubleU farg;
1464
    float32 f32;
1465
    farg.ll = arg;
1466

    
1467
#if USE_PRECISE_EMULATION
1468
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
        /* sNaN square root */
1470
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471
    } else {
1472
       f32 = float64_to_float32(farg.d, &env->fp_status);
1473
       farg.d = float32_to_float64(f32, &env->fp_status);
1474
    }
1475
#else
1476
    f32 = float64_to_float32(farg.d, &env->fp_status);
1477
    farg.d = float32_to_float64(f32, &env->fp_status);
1478
#endif
1479
    return farg.ll;
1480
}
1481

    
1482
/* fsqrt - fsqrt. */
1483
uint64_t helper_fsqrt (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1489
        /* sNaN square root */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1492
        /* Square root of a negative nonzero number */
1493
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494
    } else {
1495
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1496
    }
1497
    return farg.ll;
1498
}
1499

    
1500
/* fre - fre. */
1501
uint64_t helper_fre (uint64_t arg)
1502
{
1503
    CPU_DoubleU fone, farg;
1504
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1508
        /* sNaN reciprocal */
1509
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510
    } else {
1511
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1512
    }
1513
    return farg.d;
1514
}
1515

    
1516
/* fres - fres. */
1517
uint64_t helper_fres (uint64_t arg)
1518
{
1519
    CPU_DoubleU fone, farg;
1520
    float32 f32;
1521
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1522
    farg.ll = arg;
1523

    
1524
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1525
        /* sNaN reciprocal */
1526
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527
    } else {
1528
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1529
        f32 = float64_to_float32(farg.d, &env->fp_status);
1530
        farg.d = float32_to_float64(f32, &env->fp_status);
1531
    }
1532
    return farg.ll;
1533
}
1534

    
1535
/* frsqrte  - frsqrte. */
1536
uint64_t helper_frsqrte (uint64_t arg)
1537
{
1538
    CPU_DoubleU fone, farg;
1539
    float32 f32;
1540
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1541
    farg.ll = arg;
1542

    
1543
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1544
        /* sNaN reciprocal square root */
1545
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1546
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1547
        /* Reciprocal square root of a negative nonzero number */
1548
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1549
    } else {
1550
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1551
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1552
        f32 = float64_to_float32(farg.d, &env->fp_status);
1553
        farg.d = float32_to_float64(f32, &env->fp_status);
1554
    }
1555
    return farg.ll;
1556
}
1557

    
1558
/* fsel - fsel. */
1559
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1560
{
1561
    CPU_DoubleU farg1;
1562

    
1563
    farg1.ll = arg1;
1564

    
1565
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1566
        return arg2;
1567
    else
1568
        return arg3;
1569
}
1570

    
1571
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572
{
1573
    CPU_DoubleU farg1, farg2;
1574
    uint32_t ret = 0;
1575
    farg1.ll = arg1;
1576
    farg2.ll = arg2;
1577

    
1578
    if (unlikely(float64_is_nan(farg1.d) ||
1579
                 float64_is_nan(farg2.d))) {
1580
        ret = 0x01UL;
1581
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582
        ret = 0x08UL;
1583
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x04UL;
1585
    } else {
1586
        ret = 0x02UL;
1587
    }
1588

    
1589
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590
    env->fpscr |= ret << FPSCR_FPRF;
1591
    env->crf[crfD] = ret;
1592
    if (unlikely(ret == 0x01UL
1593
                 && (float64_is_signaling_nan(farg1.d) ||
1594
                     float64_is_signaling_nan(farg2.d)))) {
1595
        /* sNaN comparison */
1596
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1597
    }
1598
}
1599

    
1600
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1601
{
1602
    CPU_DoubleU farg1, farg2;
1603
    uint32_t ret = 0;
1604
    farg1.ll = arg1;
1605
    farg2.ll = arg2;
1606

    
1607
    if (unlikely(float64_is_nan(farg1.d) ||
1608
                 float64_is_nan(farg2.d))) {
1609
        ret = 0x01UL;
1610
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1611
        ret = 0x08UL;
1612
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1613
        ret = 0x04UL;
1614
    } else {
1615
        ret = 0x02UL;
1616
    }
1617

    
1618
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1619
    env->fpscr |= ret << FPSCR_FPRF;
1620
    env->crf[crfD] = ret;
1621
    if (unlikely (ret == 0x01UL)) {
1622
        if (float64_is_signaling_nan(farg1.d) ||
1623
            float64_is_signaling_nan(farg2.d)) {
1624
            /* sNaN comparison */
1625
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1626
                                  POWERPC_EXCP_FP_VXVC);
1627
        } else {
1628
            /* qNaN comparison */
1629
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1630
        }
1631
    }
1632
}
1633

    
1634
#if !defined (CONFIG_USER_ONLY)
1635
void helper_store_msr (target_ulong val)
1636
{
1637
    val = hreg_store_msr(env, val, 0);
1638
    if (val != 0) {
1639
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1640
        helper_raise_exception(val);
1641
    }
1642
}
1643

    
1644
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1645
                                    target_ulong msrm, int keep_msrh)
1646
{
1647
#if defined(TARGET_PPC64)
1648
    if (msr & (1ULL << MSR_SF)) {
1649
        nip = (uint64_t)nip;
1650
        msr &= (uint64_t)msrm;
1651
    } else {
1652
        nip = (uint32_t)nip;
1653
        msr = (uint32_t)(msr & msrm);
1654
        if (keep_msrh)
1655
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656
    }
1657
#else
1658
    nip = (uint32_t)nip;
1659
    msr &= (uint32_t)msrm;
1660
#endif
1661
    /* XXX: beware: this is false if VLE is supported */
1662
    env->nip = nip & ~((target_ulong)0x00000003);
1663
    hreg_store_msr(env, msr, 1);
1664
#if defined (DEBUG_OP)
1665
    cpu_dump_rfi(env->nip, env->msr);
1666
#endif
1667
    /* No need to raise an exception here,
1668
     * as rfi is always the last insn of a TB
1669
     */
1670
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671
}
1672

    
1673
void helper_rfi (void)
1674
{
1675
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1676
           ~((target_ulong)0xFFFF0000), 1);
1677
}
1678

    
1679
#if defined(TARGET_PPC64)
1680
void helper_rfid (void)
1681
{
1682
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1683
           ~((target_ulong)0xFFFF0000), 0);
1684
}
1685

    
1686
void helper_hrfid (void)
1687
{
1688
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1689
           ~((target_ulong)0xFFFF0000), 0);
1690
}
1691
#endif
1692
#endif
1693

    
1694
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1695
{
1696
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1697
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1698
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1699
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1700
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1701
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1702
    }
1703
}
1704

    
1705
#if defined(TARGET_PPC64)
1706
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1707
{
1708
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1709
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1710
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1711
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1712
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1713
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1714
}
1715
#endif
1716

    
1717
/*****************************************************************************/
1718
/* PowerPC 601 specific instructions (POWER bridge) */
1719

    
1720
target_ulong helper_clcs (uint32_t arg)
1721
{
1722
    switch (arg) {
1723
    case 0x0CUL:
1724
        /* Instruction cache line size */
1725
        return env->icache_line_size;
1726
        break;
1727
    case 0x0DUL:
1728
        /* Data cache line size */
1729
        return env->dcache_line_size;
1730
        break;
1731
    case 0x0EUL:
1732
        /* Minimum cache line size */
1733
        return (env->icache_line_size < env->dcache_line_size) ?
1734
                env->icache_line_size : env->dcache_line_size;
1735
        break;
1736
    case 0x0FUL:
1737
        /* Maximum cache line size */
1738
        return (env->icache_line_size > env->dcache_line_size) ?
1739
                env->icache_line_size : env->dcache_line_size;
1740
        break;
1741
    default:
1742
        /* Undefined */
1743
        return 0;
1744
        break;
1745
    }
1746
}
1747

    
1748
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1749
{
1750
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1751

    
1752
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->spr[SPR_MQ] = tmp % arg2;
1758
        return  tmp / (int32_t)arg2;
1759
    }
1760
}
1761

    
1762
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1763
{
1764
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1765

    
1766
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1767
        (int32_t)arg2 == 0) {
1768
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1769
        env->spr[SPR_MQ] = 0;
1770
        return INT32_MIN;
1771
    } else {
1772
        env->spr[SPR_MQ] = tmp % arg2;
1773
        tmp /= (int32_t)arg2;
1774
        if ((int32_t)tmp != tmp) {
1775
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1776
        } else {
1777
            env->xer &= ~(1 << XER_OV);
1778
        }
1779
        return tmp;
1780
    }
1781
}
1782

    
1783
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1784
{
1785
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786
        (int32_t)arg2 == 0) {
1787
        env->spr[SPR_MQ] = 0;
1788
        return INT32_MIN;
1789
    } else {
1790
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1791
        return (int32_t)arg1 / (int32_t)arg2;
1792
    }
1793
}
1794

    
1795
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1796
{
1797
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1798
        (int32_t)arg2 == 0) {
1799
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1800
        env->spr[SPR_MQ] = 0;
1801
        return INT32_MIN;
1802
    } else {
1803
        env->xer &= ~(1 << XER_OV);
1804
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1805
        return (int32_t)arg1 / (int32_t)arg2;
1806
    }
1807
}
1808

    
1809
#if !defined (CONFIG_USER_ONLY)
1810
target_ulong helper_rac (target_ulong addr)
1811
{
1812
    mmu_ctx_t ctx;
1813
    int nb_BATs;
1814
    target_ulong ret = 0;
1815

    
1816
    /* We don't have to generate many instances of this instruction,
1817
     * as rac is supervisor only.
1818
     */
1819
    /* XXX: FIX THIS: Pretend we have no BAT */
1820
    nb_BATs = env->nb_BATs;
1821
    env->nb_BATs = 0;
1822
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1823
        ret = ctx.raddr;
1824
    env->nb_BATs = nb_BATs;
1825
    return ret;
1826
}
1827

    
1828
void helper_rfsvc (void)
1829
{
1830
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1831
}
1832
#endif
1833

    
1834
/*****************************************************************************/
1835
/* 602 specific instructions */
1836
/* mfrom is the most crazy instruction ever seen, imho ! */
1837
/* Real implementation uses a ROM table. Do the same */
1838
/* Extremly decomposed:
1839
 *                      -arg / 256
1840
 * return 256 * log10(10           + 1.0) + 0.5
1841
 */
1842
#if !defined (CONFIG_USER_ONLY)
1843
target_ulong helper_602_mfrom (target_ulong arg)
1844
{
1845
    if (likely(arg < 602)) {
1846
#include "mfrom_table.c"
1847
        return mfrom_ROM_table[arg];
1848
    } else {
1849
        return 0;
1850
    }
1851
}
1852
#endif
1853

    
1854
/*****************************************************************************/
1855
/* Embedded PowerPC specific helpers */
1856

    
1857
/* XXX: to be improved to check access rights when in user-mode */
1858
target_ulong helper_load_dcr (target_ulong dcrn)
1859
{
1860
    target_ulong val = 0;
1861

    
1862
    if (unlikely(env->dcr_env == NULL)) {
1863
        if (loglevel != 0) {
1864
            fprintf(logfile, "No DCR environment\n");
1865
        }
1866
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1868
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1869
        if (loglevel != 0) {
1870
            fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1871
        }
1872
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1873
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1874
    }
1875
    return val;
1876
}
1877

    
1878
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1879
{
1880
    if (unlikely(env->dcr_env == NULL)) {
1881
        if (loglevel != 0) {
1882
            fprintf(logfile, "No DCR environment\n");
1883
        }
1884
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1885
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1886
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1887
        if (loglevel != 0) {
1888
            fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1889
        }
1890
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1891
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1892
    }
1893
}
1894

    
1895
#if !defined(CONFIG_USER_ONLY)
1896
void helper_40x_rfci (void)
1897
{
1898
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1899
           ~((target_ulong)0xFFFF0000), 0);
1900
}
1901

    
1902
void helper_rfci (void)
1903
{
1904
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1905
           ~((target_ulong)0x3FFF0000), 0);
1906
}
1907

    
1908
void helper_rfdi (void)
1909
{
1910
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1911
           ~((target_ulong)0x3FFF0000), 0);
1912
}
1913

    
1914
void helper_rfmci (void)
1915
{
1916
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1917
           ~((target_ulong)0x3FFF0000), 0);
1918
}
1919
#endif
1920

    
1921
/* 440 specific */
1922
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1923
{
1924
    target_ulong mask;
1925
    int i;
1926

    
1927
    i = 1;
1928
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929
        if ((high & mask) == 0) {
1930
            if (update_Rc) {
1931
                env->crf[0] = 0x4;
1932
            }
1933
            goto done;
1934
        }
1935
        i++;
1936
    }
1937
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1938
        if ((low & mask) == 0) {
1939
            if (update_Rc) {
1940
                env->crf[0] = 0x8;
1941
            }
1942
            goto done;
1943
        }
1944
        i++;
1945
    }
1946
    if (update_Rc) {
1947
        env->crf[0] = 0x2;
1948
    }
1949
 done:
1950
    env->xer = (env->xer & ~0x7F) | i;
1951
    if (update_Rc) {
1952
        env->crf[0] |= xer_so;
1953
    }
1954
    return i;
1955
}
1956

    
1957
/*****************************************************************************/
1958
/* Altivec extension helpers */
1959
#if defined(WORDS_BIGENDIAN)
1960
#define HI_IDX 0
1961
#define LO_IDX 1
1962
#else
1963
#define HI_IDX 1
1964
#define LO_IDX 0
1965
#endif
1966

    
1967
#if defined(WORDS_BIGENDIAN)
1968
#define VECTOR_FOR_INORDER_I(index, element)            \
1969
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1970
#else
1971
#define VECTOR_FOR_INORDER_I(index, element)            \
1972
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1973
#endif
1974

    
1975
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1976
{
1977
    int i, j = (sh & 0xf);
1978

    
1979
    VECTOR_FOR_INORDER_I (i, u8) {
1980
        r->u8[i] = j++;
1981
    }
1982
}
1983

    
1984
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1985
{
1986
    int i, j = 0x10 - (sh & 0xf);
1987

    
1988
    VECTOR_FOR_INORDER_I (i, u8) {
1989
        r->u8[i] = j++;
1990
    }
1991
}
1992

    
1993
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1994
{
1995
    int i;
1996
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1997
        r->u32[i] = ~a->u32[i] < b->u32[i];
1998
    }
1999
}
2000

    
2001
#define VARITH_DO(name, op, element)        \
2002
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2003
{                                                                       \
2004
    int i;                                                              \
2005
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2006
        r->element[i] = a->element[i] op b->element[i];                 \
2007
    }                                                                   \
2008
}
2009
#define VARITH(suffix, element)                  \
2010
  VARITH_DO(add##suffix, +, element)             \
2011
  VARITH_DO(sub##suffix, -, element)
2012
VARITH(ubm, u8)
2013
VARITH(uhm, u16)
2014
VARITH(uwm, u32)
2015
#undef VARITH_DO
2016
#undef VARITH
2017

    
2018
#define VAVG_DO(name, element, etype)                                   \
2019
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2020
    {                                                                   \
2021
        int i;                                                          \
2022
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2023
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2024
            r->element[i] = x >> 1;                                     \
2025
        }                                                               \
2026
    }
2027

    
2028
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2029
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2030
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2031
VAVG(b, s8, int16_t, u8, uint16_t)
2032
VAVG(h, s16, int32_t, u16, uint32_t)
2033
VAVG(w, s32, int64_t, u32, uint64_t)
2034
#undef VAVG_DO
2035
#undef VAVG
2036

    
2037
#define VMINMAX_DO(name, compare, element)                              \
2038
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2039
    {                                                                   \
2040
        int i;                                                          \
2041
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2042
            if (a->element[i] compare b->element[i]) {                  \
2043
                r->element[i] = b->element[i];                          \
2044
            } else {                                                    \
2045
                r->element[i] = a->element[i];                          \
2046
            }                                                           \
2047
        }                                                               \
2048
    }
2049
#define VMINMAX(suffix, element)                \
2050
  VMINMAX_DO(min##suffix, >, element)           \
2051
  VMINMAX_DO(max##suffix, <, element)
2052
VMINMAX(sb, s8)
2053
VMINMAX(sh, s16)
2054
VMINMAX(sw, s32)
2055
VMINMAX(ub, u8)
2056
VMINMAX(uh, u16)
2057
VMINMAX(uw, u32)
2058
#undef VMINMAX_DO
2059
#undef VMINMAX
2060

    
2061
#define VMRG_DO(name, element, highp)                                   \
2062
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2063
    {                                                                   \
2064
        ppc_avr_t result;                                               \
2065
        int i;                                                          \
2066
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2067
        for (i = 0; i < n_elems/2; i++) {                               \
2068
            if (highp) {                                                \
2069
                result.element[i*2+HI_IDX] = a->element[i];             \
2070
                result.element[i*2+LO_IDX] = b->element[i];             \
2071
            } else {                                                    \
2072
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2073
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2074
            }                                                           \
2075
        }                                                               \
2076
        *r = result;                                                    \
2077
    }
2078
#if defined(WORDS_BIGENDIAN)
2079
#define MRGHI 0
2080
#define MRGL0 1
2081
#else
2082
#define MRGHI 1
2083
#define MRGLO 0
2084
#endif
2085
#define VMRG(suffix, element)                   \
2086
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2087
  VMRG_DO(mrgh##suffix, element, MRGLO)
2088
VMRG(b, u8)
2089
VMRG(h, u16)
2090
VMRG(w, u32)
2091
#undef VMRG_DO
2092
#undef VMRG
2093
#undef MRGHI
2094
#undef MRGLO
2095

    
2096
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2097
{
2098
    int32_t prod[16];
2099
    int i;
2100

    
2101
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2102
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2103
    }
2104

    
2105
    VECTOR_FOR_INORDER_I(i, s32) {
2106
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2107
    }
2108
}
2109

    
2110
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2111
{
2112
    uint16_t prod[16];
2113
    int i;
2114

    
2115
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2116
        prod[i] = a->u8[i] * b->u8[i];
2117
    }
2118

    
2119
    VECTOR_FOR_INORDER_I(i, u32) {
2120
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2121
    }
2122
}
2123

    
2124
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2125
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2126
    {                                                                   \
2127
        int i;                                                          \
2128
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2129
            if (evenp) {                                                \
2130
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2131
            } else {                                                    \
2132
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2133
            }                                                           \
2134
        }                                                               \
2135
    }
2136
#define VMUL(suffix, mul_element, prod_element) \
2137
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2138
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2139
VMUL(sb, s8, s16)
2140
VMUL(sh, s16, s32)
2141
VMUL(ub, u8, u16)
2142
VMUL(uh, u16, u32)
2143
#undef VMUL_DO
2144
#undef VMUL
2145

    
2146
#define VROTATE(suffix, element)                                        \
2147
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2148
    {                                                                   \
2149
        int i;                                                          \
2150
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2151
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2152
            unsigned int shift = b->element[i] & mask;                  \
2153
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2154
        }                                                               \
2155
    }
2156
VROTATE(b, u8)
2157
VROTATE(h, u16)
2158
VROTATE(w, u32)
2159
#undef VROTATE
2160

    
2161
#define VSL(suffix, element)                                            \
2162
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2163
    {                                                                   \
2164
        int i;                                                          \
2165
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2166
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2167
            unsigned int shift = b->element[i] & mask;                  \
2168
            r->element[i] = a->element[i] << shift;                     \
2169
        }                                                               \
2170
    }
2171
VSL(b, u8)
2172
VSL(h, u16)
2173
VSL(w, u32)
2174
#undef VSL
2175

    
2176
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2177
{
2178
    int sh = shift & 0xf;
2179
    int i;
2180
    ppc_avr_t result;
2181

    
2182
#if defined(WORDS_BIGENDIAN)
2183
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2184
        int index = sh + i;
2185
        if (index > 0xf) {
2186
            result.u8[i] = b->u8[index-0x10];
2187
        } else {
2188
            result.u8[i] = a->u8[index];
2189
        }
2190
    }
2191
#else
2192
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2193
        int index = (16 - sh) + i;
2194
        if (index > 0xf) {
2195
            result.u8[i] = a->u8[index-0x10];
2196
        } else {
2197
            result.u8[i] = b->u8[index];
2198
        }
2199
    }
2200
#endif
2201
    *r = result;
2202
}
2203

    
2204
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2205
{
2206
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2207

    
2208
#if defined (WORDS_BIGENDIAN)
2209
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2210
  memset (&r->u8[16-sh], 0, sh);
2211
#else
2212
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2213
  memset (&r->u8[0], 0, sh);
2214
#endif
2215
}
2216

    
2217
/* Experimental testing shows that hardware masks the immediate.  */
2218
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2219
#if defined(WORDS_BIGENDIAN)
2220
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2221
#else
2222
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2223
#endif
2224
#define VSPLT(suffix, element)                                          \
2225
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2226
    {                                                                   \
2227
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2228
        int i;                                                          \
2229
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2230
            r->element[i] = s;                                          \
2231
        }                                                               \
2232
    }
2233
VSPLT(b, u8)
2234
VSPLT(h, u16)
2235
VSPLT(w, u32)
2236
#undef VSPLT
2237
#undef SPLAT_ELEMENT
2238
#undef _SPLAT_MASKED
2239

    
2240
#define VSR(suffix, element)                                            \
2241
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2242
    {                                                                   \
2243
        int i;                                                          \
2244
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2245
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2246
            unsigned int shift = b->element[i] & mask;                  \
2247
            r->element[i] = a->element[i] >> shift;                     \
2248
        }                                                               \
2249
    }
2250
VSR(ab, s8)
2251
VSR(ah, s16)
2252
VSR(aw, s32)
2253
VSR(b, u8)
2254
VSR(h, u16)
2255
VSR(w, u32)
2256
#undef VSR
2257

    
2258
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2259
{
2260
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2261

    
2262
#if defined (WORDS_BIGENDIAN)
2263
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2264
  memset (&r->u8[0], 0, sh);
2265
#else
2266
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2267
  memset (&r->u8[16-sh], 0, sh);
2268
#endif
2269
}
2270

    
2271
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2272
{
2273
    int i;
2274
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2275
        r->u32[i] = a->u32[i] >= b->u32[i];
2276
    }
2277
}
2278

    
2279
#if defined(WORDS_BIGENDIAN)
2280
#define UPKHI 1
2281
#define UPKLO 0
2282
#else
2283
#define UPKHI 0
2284
#define UPKLO 1
2285
#endif
2286
#define VUPKPX(suffix, hi)                                      \
2287
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2288
    {                                                           \
2289
        int i;                                                  \
2290
        ppc_avr_t result;                                       \
2291
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2292
            uint16_t e = b->u16[hi ? i : i+4];                  \
2293
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2294
            uint8_t r = (e >> 10) & 0x1f;                       \
2295
            uint8_t g = (e >> 5) & 0x1f;                        \
2296
            uint8_t b = e & 0x1f;                               \
2297
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2298
        }                                                               \
2299
        *r = result;                                                    \
2300
    }
2301
VUPKPX(lpx, UPKLO)
2302
VUPKPX(hpx, UPKHI)
2303
#undef VUPKPX
2304

    
2305
#define VUPK(suffix, unpacked, packee, hi)                              \
2306
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2307
    {                                                                   \
2308
        int i;                                                          \
2309
        ppc_avr_t result;                                               \
2310
        if (hi) {                                                       \
2311
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
2312
                result.unpacked[i] = b->packee[i];                      \
2313
            }                                                           \
2314
        } else {                                                        \
2315
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2316
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2317
            }                                                           \
2318
        }                                                               \
2319
        *r = result;                                                    \
2320
    }
2321
VUPK(hsb, s16, s8, UPKHI)
2322
VUPK(hsh, s32, s16, UPKHI)
2323
VUPK(lsb, s16, s8, UPKLO)
2324
VUPK(lsh, s32, s16, UPKLO)
2325
#undef VUPK
2326
#undef UPKHI
2327
#undef UPKLO
2328

    
2329
#undef VECTOR_FOR_INORDER_I
2330
#undef HI_IDX
2331
#undef LO_IDX
2332

    
2333
/*****************************************************************************/
2334
/* SPE extension helpers */
2335
/* Use a table to make this quicker */
2336
static uint8_t hbrev[16] = {
2337
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2338
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2339
};
2340

    
2341
static always_inline uint8_t byte_reverse (uint8_t val)
2342
{
2343
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2344
}
2345

    
2346
static always_inline uint32_t word_reverse (uint32_t val)
2347
{
2348
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2349
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2350
}
2351

    
2352
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2353
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2354
{
2355
    uint32_t a, b, d, mask;
2356

    
2357
    mask = UINT32_MAX >> (32 - MASKBITS);
2358
    a = arg1 & mask;
2359
    b = arg2 & mask;
2360
    d = word_reverse(1 + word_reverse(a | ~b));
2361
    return (arg1 & ~mask) | (d & b);
2362
}
2363

    
2364
uint32_t helper_cntlsw32 (uint32_t val)
2365
{
2366
    if (val & 0x80000000)
2367
        return clz32(~val);
2368
    else
2369
        return clz32(val);
2370
}
2371

    
2372
uint32_t helper_cntlzw32 (uint32_t val)
2373
{
2374
    return clz32(val);
2375
}
2376

    
2377
/* Single-precision floating-point conversions */
2378
static always_inline uint32_t efscfsi (uint32_t val)
2379
{
2380
    CPU_FloatU u;
2381

    
2382
    u.f = int32_to_float32(val, &env->spe_status);
2383

    
2384
    return u.l;
2385
}
2386

    
2387
static always_inline uint32_t efscfui (uint32_t val)
2388
{
2389
    CPU_FloatU u;
2390

    
2391
    u.f = uint32_to_float32(val, &env->spe_status);
2392

    
2393
    return u.l;
2394
}
2395

    
2396
static always_inline int32_t efsctsi (uint32_t val)
2397
{
2398
    CPU_FloatU u;
2399

    
2400
    u.l = val;
2401
    /* NaN are not treated the same way IEEE 754 does */
2402
    if (unlikely(float32_is_nan(u.f)))
2403
        return 0;
2404

    
2405
    return float32_to_int32(u.f, &env->spe_status);
2406
}
2407

    
2408
static always_inline uint32_t efsctui (uint32_t val)
2409
{
2410
    CPU_FloatU u;
2411

    
2412
    u.l = val;
2413
    /* NaN are not treated the same way IEEE 754 does */
2414
    if (unlikely(float32_is_nan(u.f)))
2415
        return 0;
2416

    
2417
    return float32_to_uint32(u.f, &env->spe_status);
2418
}
2419

    
2420
static always_inline uint32_t efsctsiz (uint32_t val)
2421
{
2422
    CPU_FloatU u;
2423

    
2424
    u.l = val;
2425
    /* NaN are not treated the same way IEEE 754 does */
2426
    if (unlikely(float32_is_nan(u.f)))
2427
        return 0;
2428

    
2429
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2430
}
2431

    
2432
static always_inline uint32_t efsctuiz (uint32_t val)
2433
{
2434
    CPU_FloatU u;
2435

    
2436
    u.l = val;
2437
    /* NaN are not treated the same way IEEE 754 does */
2438
    if (unlikely(float32_is_nan(u.f)))
2439
        return 0;
2440

    
2441
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2442
}
2443

    
2444
static always_inline uint32_t efscfsf (uint32_t val)
2445
{
2446
    CPU_FloatU u;
2447
    float32 tmp;
2448

    
2449
    u.f = int32_to_float32(val, &env->spe_status);
2450
    tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2451
    u.f = float32_div(u.f, tmp, &env->spe_status);
2452

    
2453
    return u.l;
2454
}
2455

    
2456
static always_inline uint32_t efscfuf (uint32_t val)
2457
{
2458
    CPU_FloatU u;
2459
    float32 tmp;
2460

    
2461
    u.f = uint32_to_float32(val, &env->spe_status);
2462
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2463
    u.f = float32_div(u.f, tmp, &env->spe_status);
2464

    
2465
    return u.l;
2466
}
2467

    
2468
static always_inline uint32_t efsctsf (uint32_t val)
2469
{
2470
    CPU_FloatU u;
2471
    float32 tmp;
2472

    
2473
    u.l = val;
2474
    /* NaN are not treated the same way IEEE 754 does */
2475
    if (unlikely(float32_is_nan(u.f)))
2476
        return 0;
2477
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2478
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2479

    
2480
    return float32_to_int32(u.f, &env->spe_status);
2481
}
2482

    
2483
static always_inline uint32_t efsctuf (uint32_t val)
2484
{
2485
    CPU_FloatU u;
2486
    float32 tmp;
2487

    
2488
    u.l = val;
2489
    /* NaN are not treated the same way IEEE 754 does */
2490
    if (unlikely(float32_is_nan(u.f)))
2491
        return 0;
2492
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2493
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2494

    
2495
    return float32_to_uint32(u.f, &env->spe_status);
2496
}
2497

    
2498
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2499
uint32_t helper_e##name (uint32_t val)                                        \
2500
{                                                                             \
2501
    return e##name(val);                                                      \
2502
}
2503
/* efscfsi */
2504
HELPER_SPE_SINGLE_CONV(fscfsi);
2505
/* efscfui */
2506
HELPER_SPE_SINGLE_CONV(fscfui);
2507
/* efscfuf */
2508
HELPER_SPE_SINGLE_CONV(fscfuf);
2509
/* efscfsf */
2510
HELPER_SPE_SINGLE_CONV(fscfsf);
2511
/* efsctsi */
2512
HELPER_SPE_SINGLE_CONV(fsctsi);
2513
/* efsctui */
2514
HELPER_SPE_SINGLE_CONV(fsctui);
2515
/* efsctsiz */
2516
HELPER_SPE_SINGLE_CONV(fsctsiz);
2517
/* efsctuiz */
2518
HELPER_SPE_SINGLE_CONV(fsctuiz);
2519
/* efsctsf */
2520
HELPER_SPE_SINGLE_CONV(fsctsf);
2521
/* efsctuf */
2522
HELPER_SPE_SINGLE_CONV(fsctuf);
2523

    
2524
#define HELPER_SPE_VECTOR_CONV(name)                                          \
2525
uint64_t helper_ev##name (uint64_t val)                                       \
2526
{                                                                             \
2527
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
2528
            (uint64_t)e##name(val);                                           \
2529
}
2530
/* evfscfsi */
2531
HELPER_SPE_VECTOR_CONV(fscfsi);
2532
/* evfscfui */
2533
HELPER_SPE_VECTOR_CONV(fscfui);
2534
/* evfscfuf */
2535
HELPER_SPE_VECTOR_CONV(fscfuf);
2536
/* evfscfsf */
2537
HELPER_SPE_VECTOR_CONV(fscfsf);
2538
/* evfsctsi */
2539
HELPER_SPE_VECTOR_CONV(fsctsi);
2540
/* evfsctui */
2541
HELPER_SPE_VECTOR_CONV(fsctui);
2542
/* evfsctsiz */
2543
HELPER_SPE_VECTOR_CONV(fsctsiz);
2544
/* evfsctuiz */
2545
HELPER_SPE_VECTOR_CONV(fsctuiz);
2546
/* evfsctsf */
2547
HELPER_SPE_VECTOR_CONV(fsctsf);
2548
/* evfsctuf */
2549
HELPER_SPE_VECTOR_CONV(fsctuf);
2550

    
2551
/* Single-precision floating-point arithmetic */
2552
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2553
{
2554
    CPU_FloatU u1, u2;
2555
    u1.l = op1;
2556
    u2.l = op2;
2557
    u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2558
    return u1.l;
2559
}
2560

    
2561
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2562
{
2563
    CPU_FloatU u1, u2;
2564
    u1.l = op1;
2565
    u2.l = op2;
2566
    u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2567
    return u1.l;
2568
}
2569

    
2570
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2571
{
2572
    CPU_FloatU u1, u2;
2573
    u1.l = op1;
2574
    u2.l = op2;
2575
    u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2576
    return u1.l;
2577
}
2578

    
2579
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2580
{
2581
    CPU_FloatU u1, u2;
2582
    u1.l = op1;
2583
    u2.l = op2;
2584
    u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2585
    return u1.l;
2586
}
2587

    
2588
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
2589
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2590
{                                                                             \
2591
    return e##name(op1, op2);                                                 \
2592
}
2593
/* efsadd */
2594
HELPER_SPE_SINGLE_ARITH(fsadd);
2595
/* efssub */
2596
HELPER_SPE_SINGLE_ARITH(fssub);
2597
/* efsmul */
2598
HELPER_SPE_SINGLE_ARITH(fsmul);
2599
/* efsdiv */
2600
HELPER_SPE_SINGLE_ARITH(fsdiv);
2601

    
2602
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
2603
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2604
{                                                                             \
2605
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
2606
            (uint64_t)e##name(op1, op2);                                      \
2607
}
2608
/* evfsadd */
2609
HELPER_SPE_VECTOR_ARITH(fsadd);
2610
/* evfssub */
2611
HELPER_SPE_VECTOR_ARITH(fssub);
2612
/* evfsmul */
2613
HELPER_SPE_VECTOR_ARITH(fsmul);
2614
/* evfsdiv */
2615
HELPER_SPE_VECTOR_ARITH(fsdiv);
2616

    
2617
/* Single-precision floating-point comparisons */
2618
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2619
{
2620
    CPU_FloatU u1, u2;
2621
    u1.l = op1;
2622
    u2.l = op2;
2623
    return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2624
}
2625

    
2626
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2627
{
2628
    CPU_FloatU u1, u2;
2629
    u1.l = op1;
2630
    u2.l = op2;
2631
    return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2632
}
2633

    
2634
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2635
{
2636
    CPU_FloatU u1, u2;
2637
    u1.l = op1;
2638
    u2.l = op2;
2639
    return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2640
}
2641

    
2642
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2643
{
2644
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2645
    return efststlt(op1, op2);
2646
}
2647

    
2648
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2649
{
2650
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2651
    return efststgt(op1, op2);
2652
}
2653

    
2654
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2655
{
2656
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2657
    return efststeq(op1, op2);
2658
}
2659

    
2660
#define HELPER_SINGLE_SPE_CMP(name)                                           \
2661
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2662
{                                                                             \
2663
    return e##name(op1, op2) << 2;                                            \
2664
}
2665
/* efststlt */
2666
HELPER_SINGLE_SPE_CMP(fststlt);
2667
/* efststgt */
2668
HELPER_SINGLE_SPE_CMP(fststgt);
2669
/* efststeq */
2670
HELPER_SINGLE_SPE_CMP(fststeq);
2671
/* efscmplt */
2672
HELPER_SINGLE_SPE_CMP(fscmplt);
2673
/* efscmpgt */
2674
HELPER_SINGLE_SPE_CMP(fscmpgt);
2675
/* efscmpeq */
2676
HELPER_SINGLE_SPE_CMP(fscmpeq);
2677

    
2678
static always_inline uint32_t evcmp_merge (int t0, int t1)
2679
{
2680
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2681
}
2682

    
2683
#define HELPER_VECTOR_SPE_CMP(name)                                           \
2684
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2685
{                                                                             \
2686
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
2687
}
2688
/* evfststlt */
2689
HELPER_VECTOR_SPE_CMP(fststlt);
2690
/* evfststgt */
2691
HELPER_VECTOR_SPE_CMP(fststgt);
2692
/* evfststeq */
2693
HELPER_VECTOR_SPE_CMP(fststeq);
2694
/* evfscmplt */
2695
HELPER_VECTOR_SPE_CMP(fscmplt);
2696
/* evfscmpgt */
2697
HELPER_VECTOR_SPE_CMP(fscmpgt);
2698
/* evfscmpeq */
2699
HELPER_VECTOR_SPE_CMP(fscmpeq);
2700

    
2701
/* Double-precision floating-point conversion */
2702
uint64_t helper_efdcfsi (uint32_t val)
2703
{
2704
    CPU_DoubleU u;
2705

    
2706
    u.d = int32_to_float64(val, &env->spe_status);
2707

    
2708
    return u.ll;
2709
}
2710

    
2711
uint64_t helper_efdcfsid (uint64_t val)
2712
{
2713
    CPU_DoubleU u;
2714

    
2715
    u.d = int64_to_float64(val, &env->spe_status);
2716

    
2717
    return u.ll;
2718
}
2719

    
2720
uint64_t helper_efdcfui (uint32_t val)
2721
{
2722
    CPU_DoubleU u;
2723

    
2724
    u.d = uint32_to_float64(val, &env->spe_status);
2725

    
2726
    return u.ll;
2727
}
2728

    
2729
uint64_t helper_efdcfuid (uint64_t val)
2730
{
2731
    CPU_DoubleU u;
2732

    
2733
    u.d = uint64_to_float64(val, &env->spe_status);
2734

    
2735
    return u.ll;
2736
}
2737

    
2738
uint32_t helper_efdctsi (uint64_t val)
2739
{
2740
    CPU_DoubleU u;
2741

    
2742
    u.ll = val;
2743
    /* NaN are not treated the same way IEEE 754 does */
2744
    if (unlikely(float64_is_nan(u.d)))
2745
        return 0;
2746

    
2747
    return float64_to_int32(u.d, &env->spe_status);
2748
}
2749

    
2750
uint32_t helper_efdctui (uint64_t val)
2751
{
2752
    CPU_DoubleU u;
2753

    
2754
    u.ll = val;
2755
    /* NaN are not treated the same way IEEE 754 does */
2756
    if (unlikely(float64_is_nan(u.d)))
2757
        return 0;
2758

    
2759
    return float64_to_uint32(u.d, &env->spe_status);
2760
}
2761

    
2762
uint32_t helper_efdctsiz (uint64_t val)
2763
{
2764
    CPU_DoubleU u;
2765

    
2766
    u.ll = val;
2767
    /* NaN are not treated the same way IEEE 754 does */
2768
    if (unlikely(float64_is_nan(u.d)))
2769
        return 0;
2770

    
2771
    return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2772
}
2773

    
2774
uint64_t helper_efdctsidz (uint64_t val)
2775
{
2776
    CPU_DoubleU u;
2777

    
2778
    u.ll = val;
2779
    /* NaN are not treated the same way IEEE 754 does */
2780
    if (unlikely(float64_is_nan(u.d)))
2781
        return 0;
2782

    
2783
    return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2784
}
2785

    
2786
uint32_t helper_efdctuiz (uint64_t val)
2787
{
2788
    CPU_DoubleU u;
2789

    
2790
    u.ll = val;
2791
    /* NaN are not treated the same way IEEE 754 does */
2792
    if (unlikely(float64_is_nan(u.d)))
2793
        return 0;
2794

    
2795
    return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2796
}
2797

    
2798
uint64_t helper_efdctuidz (uint64_t val)
2799
{
2800
    CPU_DoubleU u;
2801

    
2802
    u.ll = val;
2803
    /* NaN are not treated the same way IEEE 754 does */
2804
    if (unlikely(float64_is_nan(u.d)))
2805
        return 0;
2806

    
2807
    return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2808
}
2809

    
2810
uint64_t helper_efdcfsf (uint32_t val)
2811
{
2812
    CPU_DoubleU u;
2813
    float64 tmp;
2814

    
2815
    u.d = int32_to_float64(val, &env->spe_status);
2816
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2817
    u.d = float64_div(u.d, tmp, &env->spe_status);
2818

    
2819
    return u.ll;
2820
}
2821

    
2822
uint64_t helper_efdcfuf (uint32_t val)
2823
{
2824
    CPU_DoubleU u;
2825
    float64 tmp;
2826

    
2827
    u.d = uint32_to_float64(val, &env->spe_status);
2828
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2829
    u.d = float64_div(u.d, tmp, &env->spe_status);
2830

    
2831
    return u.ll;
2832
}
2833

    
2834
uint32_t helper_efdctsf (uint64_t val)
2835
{
2836
    CPU_DoubleU u;
2837
    float64 tmp;
2838

    
2839
    u.ll = val;
2840
    /* NaN are not treated the same way IEEE 754 does */
2841
    if (unlikely(float64_is_nan(u.d)))
2842
        return 0;
2843
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2844
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2845

    
2846
    return float64_to_int32(u.d, &env->spe_status);
2847
}
2848

    
2849
uint32_t helper_efdctuf (uint64_t val)
2850
{
2851
    CPU_DoubleU u;
2852
    float64 tmp;
2853

    
2854
    u.ll = val;
2855
    /* NaN are not treated the same way IEEE 754 does */
2856
    if (unlikely(float64_is_nan(u.d)))
2857
        return 0;
2858
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2859
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2860

    
2861
    return float64_to_uint32(u.d, &env->spe_status);
2862
}
2863

    
2864
uint32_t helper_efscfd (uint64_t val)
2865
{
2866
    CPU_DoubleU u1;
2867
    CPU_FloatU u2;
2868

    
2869
    u1.ll = val;
2870
    u2.f = float64_to_float32(u1.d, &env->spe_status);
2871

    
2872
    return u2.l;
2873
}
2874

    
2875
uint64_t helper_efdcfs (uint32_t val)
2876
{
2877
    CPU_DoubleU u2;
2878
    CPU_FloatU u1;
2879

    
2880
    u1.l = val;
2881
    u2.d = float32_to_float64(u1.f, &env->spe_status);
2882

    
2883
    return u2.ll;
2884
}
2885

    
2886
/* Double precision fixed-point arithmetic */
2887
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2888
{
2889
    CPU_DoubleU u1, u2;
2890
    u1.ll = op1;
2891
    u2.ll = op2;
2892
    u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2893
    return u1.ll;
2894
}
2895

    
2896
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2897
{
2898
    CPU_DoubleU u1, u2;
2899
    u1.ll = op1;
2900
    u2.ll = op2;
2901
    u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2902
    return u1.ll;
2903
}
2904

    
2905
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2906
{
2907
    CPU_DoubleU u1, u2;
2908
    u1.ll = op1;
2909
    u2.ll = op2;
2910
    u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2911
    return u1.ll;
2912
}
2913

    
2914
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2915
{
2916
    CPU_DoubleU u1, u2;
2917
    u1.ll = op1;
2918
    u2.ll = op2;
2919
    u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2920
    return u1.ll;
2921
}
2922

    
2923
/* Double precision floating point helpers */
2924
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2925
{
2926
    CPU_DoubleU u1, u2;
2927
    u1.ll = op1;
2928
    u2.ll = op2;
2929
    return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2930
}
2931

    
2932
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2933
{
2934
    CPU_DoubleU u1, u2;
2935
    u1.ll = op1;
2936
    u2.ll = op2;
2937
    return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2938
}
2939

    
2940
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2941
{
2942
    CPU_DoubleU u1, u2;
2943
    u1.ll = op1;
2944
    u2.ll = op2;
2945
    return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2946
}
2947

    
2948
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2949
{
2950
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2951
    return helper_efdtstlt(op1, op2);
2952
}
2953

    
2954
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2955
{
2956
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2957
    return helper_efdtstgt(op1, op2);
2958
}
2959

    
2960
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2961
{
2962
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2963
    return helper_efdtsteq(op1, op2);
2964
}
2965

    
2966
/*****************************************************************************/
2967
/* Softmmu support */
2968
#if !defined (CONFIG_USER_ONLY)
2969

    
2970
#define MMUSUFFIX _mmu
2971

    
2972
#define SHIFT 0
2973
#include "softmmu_template.h"
2974

    
2975
#define SHIFT 1
2976
#include "softmmu_template.h"
2977

    
2978
#define SHIFT 2
2979
#include "softmmu_template.h"
2980

    
2981
#define SHIFT 3
2982
#include "softmmu_template.h"
2983

    
2984
/* try to fill the TLB and return an exception if error. If retaddr is
2985
   NULL, it means that the function was called in C code (i.e. not
2986
   from generated code or from helper.c) */
2987
/* XXX: fix it to restore all registers */
2988
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2989
{
2990
    TranslationBlock *tb;
2991
    CPUState *saved_env;
2992
    unsigned long pc;
2993
    int ret;
2994

    
2995
    /* XXX: hack to restore env in all cases, even if not called from
2996
       generated code */
2997
    saved_env = env;
2998
    env = cpu_single_env;
2999
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3000
    if (unlikely(ret != 0)) {
3001
        if (likely(retaddr)) {
3002
            /* now we have a real cpu fault */
3003
            pc = (unsigned long)retaddr;
3004
            tb = tb_find_pc(pc);
3005
            if (likely(tb)) {
3006
                /* the PC is inside the translated code. It means that we have
3007
                   a virtual CPU fault */
3008
                cpu_restore_state(tb, env, pc, NULL);
3009
            }
3010
        }
3011
        helper_raise_exception_err(env->exception_index, env->error_code);
3012
    }
3013
    env = saved_env;
3014
}
3015

    
3016
/* Segment registers load and store */
3017
target_ulong helper_load_sr (target_ulong sr_num)
3018
{
3019
    return env->sr[sr_num];
3020
}
3021

    
3022
void helper_store_sr (target_ulong sr_num, target_ulong val)
3023
{
3024
    ppc_store_sr(env, sr_num, val);
3025
}
3026

    
3027
/* SLB management */
3028
#if defined(TARGET_PPC64)
3029
target_ulong helper_load_slb (target_ulong slb_nr)
3030
{
3031
    return ppc_load_slb(env, slb_nr);
3032
}
3033

    
3034
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3035
{
3036
    ppc_store_slb(env, slb_nr, rs);
3037
}
3038

    
3039
void helper_slbia (void)
3040
{
3041
    ppc_slb_invalidate_all(env);
3042
}
3043

    
3044
void helper_slbie (target_ulong addr)
3045
{
3046
    ppc_slb_invalidate_one(env, addr);
3047
}
3048

    
3049
#endif /* defined(TARGET_PPC64) */
3050

    
3051
/* TLB management */
3052
void helper_tlbia (void)
3053
{
3054
    ppc_tlb_invalidate_all(env);
3055
}
3056

    
3057
void helper_tlbie (target_ulong addr)
3058
{
3059
    ppc_tlb_invalidate_one(env, addr);
3060
}
3061

    
3062
/* Software driven TLBs management */
3063
/* PowerPC 602/603 software TLB load instructions helpers */
3064
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3065
{
3066
    target_ulong RPN, CMP, EPN;
3067
    int way;
3068

    
3069
    RPN = env->spr[SPR_RPA];
3070
    if (is_code) {
3071
        CMP = env->spr[SPR_ICMP];
3072
        EPN = env->spr[SPR_IMISS];
3073
    } else {
3074
        CMP = env->spr[SPR_DCMP];
3075
        EPN = env->spr[SPR_DMISS];
3076
    }
3077
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3078
#if defined (DEBUG_SOFTWARE_TLB)
3079
    if (loglevel != 0) {
3080
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3081
                " PTE1 " ADDRX " way %d\n",
3082
                __func__, new_EPN, EPN, CMP, RPN, way);
3083
    }
3084
#endif
3085
    /* Store this TLB */
3086
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3087
                     way, is_code, CMP, RPN);
3088
}
3089

    
3090
void helper_6xx_tlbd (target_ulong EPN)
3091
{
3092
    do_6xx_tlb(EPN, 0);
3093
}
3094

    
3095
void helper_6xx_tlbi (target_ulong EPN)
3096
{
3097
    do_6xx_tlb(EPN, 1);
3098
}
3099

    
3100
/* PowerPC 74xx software TLB load instructions helpers */
3101
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3102
{
3103
    target_ulong RPN, CMP, EPN;
3104
    int way;
3105

    
3106
    RPN = env->spr[SPR_PTELO];
3107
    CMP = env->spr[SPR_PTEHI];
3108
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3109
    way = env->spr[SPR_TLBMISS] & 0x3;
3110
#if defined (DEBUG_SOFTWARE_TLB)
3111
    if (loglevel != 0) {
3112
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3113
                " PTE1 " ADDRX " way %d\n",
3114
                __func__, new_EPN, EPN, CMP, RPN, way);
3115
    }
3116
#endif
3117
    /* Store this TLB */
3118
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3119
                     way, is_code, CMP, RPN);
3120
}
3121

    
3122
void helper_74xx_tlbd (target_ulong EPN)
3123
{
3124
    do_74xx_tlb(EPN, 0);
3125
}
3126

    
3127
void helper_74xx_tlbi (target_ulong EPN)
3128
{
3129
    do_74xx_tlb(EPN, 1);
3130
}
3131

    
3132
static always_inline target_ulong booke_tlb_to_page_size (int size)
3133
{
3134
    return 1024 << (2 * size);
3135
}
3136

    
3137
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3138
{
3139
    int size;
3140

    
3141
    switch (page_size) {
3142
    case 0x00000400UL:
3143
        size = 0x0;
3144
        break;
3145
    case 0x00001000UL:
3146
        size = 0x1;
3147
        break;
3148
    case 0x00004000UL:
3149
        size = 0x2;
3150
        break;
3151
    case 0x00010000UL:
3152
        size = 0x3;
3153
        break;
3154
    case 0x00040000UL:
3155
        size = 0x4;
3156
        break;
3157
    case 0x00100000UL:
3158
        size = 0x5;
3159
        break;
3160
    case 0x00400000UL:
3161
        size = 0x6;
3162
        break;
3163
    case 0x01000000UL:
3164
        size = 0x7;
3165
        break;
3166
    case 0x04000000UL:
3167
        size = 0x8;
3168
        break;
3169
    case 0x10000000UL:
3170
        size = 0x9;
3171
        break;
3172
    case 0x40000000UL:
3173
        size = 0xA;
3174
        break;
3175
#if defined (TARGET_PPC64)
3176
    case 0x000100000000ULL:
3177
        size = 0xB;
3178
        break;
3179
    case 0x000400000000ULL:
3180
        size = 0xC;
3181
        break;
3182
    case 0x001000000000ULL:
3183
        size = 0xD;
3184
        break;
3185
    case 0x004000000000ULL:
3186
        size = 0xE;
3187
        break;
3188
    case 0x010000000000ULL:
3189
        size = 0xF;
3190
        break;
3191
#endif
3192
    default:
3193
        size = -1;
3194
        break;
3195
    }
3196

    
3197
    return size;
3198
}
3199

    
3200
/* Helpers for 4xx TLB management */
3201
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3202
{
3203
    ppcemb_tlb_t *tlb;
3204
    target_ulong ret;
3205
    int size;
3206

    
3207
    entry &= 0x3F;
3208
    tlb = &env->tlb[entry].tlbe;
3209
    ret = tlb->EPN;
3210
    if (tlb->prot & PAGE_VALID)
3211
        ret |= 0x400;
3212
    size = booke_page_size_to_tlb(tlb->size);
3213
    if (size < 0 || size > 0x7)
3214
        size = 1;
3215
    ret |= size << 7;
3216
    env->spr[SPR_40x_PID] = tlb->PID;
3217
    return ret;
3218
}
3219

    
3220
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3221
{
3222
    ppcemb_tlb_t *tlb;
3223
    target_ulong ret;
3224

    
3225
    entry &= 0x3F;
3226
    tlb = &env->tlb[entry].tlbe;
3227
    ret = tlb->RPN;
3228
    if (tlb->prot & PAGE_EXEC)
3229
        ret |= 0x200;
3230
    if (tlb->prot & PAGE_WRITE)
3231
        ret |= 0x100;
3232
    return ret;
3233
}
3234

    
3235
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3236
{
3237
    ppcemb_tlb_t *tlb;
3238
    target_ulong page, end;
3239

    
3240
#if defined (DEBUG_SOFTWARE_TLB)
3241
    if (loglevel != 0) {
3242
        fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3243
    }
3244
#endif
3245
    entry &= 0x3F;
3246
    tlb = &env->tlb[entry].tlbe;
3247
    /* Invalidate previous TLB (if it's valid) */
3248
    if (tlb->prot & PAGE_VALID) {
3249
        end = tlb->EPN + tlb->size;
3250
#if defined (DEBUG_SOFTWARE_TLB)
3251
        if (loglevel != 0) {
3252
            fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
3253
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3254
        }
3255
#endif
3256
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3257
            tlb_flush_page(env, page);
3258
    }
3259
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3260
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3261
     * If this ever occurs, one should use the ppcemb target instead
3262
     * of the ppc or ppc64 one
3263
     */
3264
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3265
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3266
                  "are not supported (%d)\n",
3267
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3268
    }
3269
    tlb->EPN = val & ~(tlb->size - 1);
3270
    if (val & 0x40)
3271
        tlb->prot |= PAGE_VALID;
3272
    else
3273
        tlb->prot &= ~PAGE_VALID;
3274
    if (val & 0x20) {
3275
        /* XXX: TO BE FIXED */
3276
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3277
    }
3278
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3279
    tlb->attr = val & 0xFF;
3280
#if defined (DEBUG_SOFTWARE_TLB)
3281
    if (loglevel != 0) {
3282
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3283
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3284
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3285
                tlb->prot & PAGE_READ ? 'r' : '-',
3286
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3287
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3288
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3289
    }
3290
#endif
3291
    /* Invalidate new TLB (if valid) */
3292
    if (tlb->prot & PAGE_VALID) {
3293
        end = tlb->EPN + tlb->size;
3294
#if defined (DEBUG_SOFTWARE_TLB)
3295
        if (loglevel != 0) {
3296
            fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
3297
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3298
        }
3299
#endif
3300
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3301
            tlb_flush_page(env, page);
3302
    }
3303
}
3304

    
3305
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3306
{
3307
    ppcemb_tlb_t *tlb;
3308

    
3309
#if defined (DEBUG_SOFTWARE_TLB)
3310
    if (loglevel != 0) {
3311
        fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3312
    }
3313
#endif
3314
    entry &= 0x3F;
3315
    tlb = &env->tlb[entry].tlbe;
3316
    tlb->RPN = val & 0xFFFFFC00;
3317
    tlb->prot = PAGE_READ;
3318
    if (val & 0x200)
3319
        tlb->prot |= PAGE_EXEC;
3320
    if (val & 0x100)
3321
        tlb->prot |= PAGE_WRITE;
3322
#if defined (DEBUG_SOFTWARE_TLB)
3323
    if (loglevel != 0) {
3324
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3325
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3326
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3327
                tlb->prot & PAGE_READ ? 'r' : '-',
3328
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3329
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3330
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3331
    }
3332
#endif
3333
}
3334

    
3335
target_ulong helper_4xx_tlbsx (target_ulong address)
3336
{
3337
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3338
}
3339

    
3340
/* PowerPC 440 TLB management */
3341
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3342
{
3343
    ppcemb_tlb_t *tlb;
3344
    target_ulong EPN, RPN, size;
3345
    int do_flush_tlbs;
3346

    
3347
#if defined (DEBUG_SOFTWARE_TLB)
3348
    if (loglevel != 0) {
3349
        fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3350
                __func__, word, (int)entry, value);
3351
    }
3352
#endif
3353
    do_flush_tlbs = 0;
3354
    entry &= 0x3F;
3355
    tlb = &env->tlb[entry].tlbe;
3356
    switch (word) {
3357
    default:
3358
        /* Just here to please gcc */
3359
    case 0:
3360
        EPN = value & 0xFFFFFC00;
3361
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3362
            do_flush_tlbs = 1;
3363
        tlb->EPN = EPN;
3364
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3365
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3366
            do_flush_tlbs = 1;
3367
        tlb->size = size;
3368
        tlb->attr &= ~0x1;
3369
        tlb->attr |= (value >> 8) & 1;
3370
        if (value & 0x200) {
3371
            tlb->prot |= PAGE_VALID;
3372
        } else {
3373
            if (tlb->prot & PAGE_VALID) {
3374
                tlb->prot &= ~PAGE_VALID;
3375
                do_flush_tlbs = 1;
3376
            }
3377
        }
3378
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3379
        if (do_flush_tlbs)
3380
            tlb_flush(env, 1);
3381
        break;
3382
    case 1:
3383
        RPN = value & 0xFFFFFC0F;
3384
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3385
            tlb_flush(env, 1);
3386
        tlb->RPN = RPN;
3387
        break;
3388
    case 2:
3389
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3390
        tlb->prot = tlb->prot & PAGE_VALID;
3391
        if (value & 0x1)
3392
            tlb->prot |= PAGE_READ << 4;
3393
        if (value & 0x2)
3394
            tlb->prot |= PAGE_WRITE << 4;
3395
        if (value & 0x4)
3396
            tlb->prot |= PAGE_EXEC << 4;
3397
        if (value & 0x8)
3398
            tlb->prot |= PAGE_READ;
3399
        if (value & 0x10)
3400
            tlb->prot |= PAGE_WRITE;
3401
        if (value & 0x20)
3402
            tlb->prot |= PAGE_EXEC;
3403
        break;
3404
    }
3405
}
3406

    
3407
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3408
{
3409
    ppcemb_tlb_t *tlb;
3410
    target_ulong ret;
3411
    int size;
3412

    
3413
    entry &= 0x3F;
3414
    tlb = &env->tlb[entry].tlbe;
3415
    switch (word) {
3416
    default:
3417
        /* Just here to please gcc */
3418
    case 0:
3419
        ret = tlb->EPN;
3420
        size = booke_page_size_to_tlb(tlb->size);
3421
        if (size < 0 || size > 0xF)
3422
            size = 1;
3423
        ret |= size << 4;
3424
        if (tlb->attr & 0x1)
3425
            ret |= 0x100;
3426
        if (tlb->prot & PAGE_VALID)
3427
            ret |= 0x200;
3428
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3429
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3430
        break;
3431
    case 1:
3432
        ret = tlb->RPN;
3433
        break;
3434
    case 2:
3435
        ret = tlb->attr & ~0x1;
3436
        if (tlb->prot & (PAGE_READ << 4))
3437
            ret |= 0x1;
3438
        if (tlb->prot & (PAGE_WRITE << 4))
3439
            ret |= 0x2;
3440
        if (tlb->prot & (PAGE_EXEC << 4))
3441
            ret |= 0x4;
3442
        if (tlb->prot & PAGE_READ)
3443
            ret |= 0x8;
3444
        if (tlb->prot & PAGE_WRITE)
3445
            ret |= 0x10;
3446
        if (tlb->prot & PAGE_EXEC)
3447
            ret |= 0x20;
3448
        break;
3449
    }
3450
    return ret;
3451
}
3452

    
3453
target_ulong helper_440_tlbsx (target_ulong address)
3454
{
3455
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3456
}
3457

    
3458
#endif /* !CONFIG_USER_ONLY */