Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 2d9671d3

History | View | Annotate | Download (28.2 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "softfloat.h"
23
#include "helper.h"
24
#include "qemu-timer.h"
25

    
26
/*****************************************************************************/
27
/* Exceptions processing helpers */
28

    
29
/* This should only be called from translate, via gen_excp.
30
   We expect that ENV->PC has already been updated.  */
31
void QEMU_NORETURN helper_excp(int excp, int error)
32
{
33
    env->exception_index = excp;
34
    env->error_code = error;
35
    cpu_loop_exit();
36
}
37

    
38
static void do_restore_state(void *retaddr)
39
{
40
    unsigned long pc = (unsigned long)retaddr;
41

    
42
    if (pc) {
43
        TranslationBlock *tb = tb_find_pc(pc);
44
        if (tb) {
45
            cpu_restore_state(tb, env, pc);
46
        }
47
    }
48
}
49

    
50
/* This may be called from any of the helpers to set up EXCEPTION_INDEX.  */
51
static void QEMU_NORETURN dynamic_excp(int excp, int error)
52
{
53
    env->exception_index = excp;
54
    env->error_code = error;
55
    do_restore_state(GETPC());
56
    cpu_loop_exit();
57
}
58

    
59
static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
60
{
61
    env->trap_arg0 = exc;
62
    env->trap_arg1 = mask;
63
    dynamic_excp(EXCP_ARITH, 0);
64
}
65

    
66
uint64_t helper_load_pcc (void)
67
{
68
    /* ??? This isn't a timer for which we have any rate info.  */
69
    return (uint32_t)cpu_get_real_ticks();
70
}
71

    
72
uint64_t helper_load_fpcr (void)
73
{
74
    return cpu_alpha_load_fpcr (env);
75
}
76

    
77
void helper_store_fpcr (uint64_t val)
78
{
79
    cpu_alpha_store_fpcr (env, val);
80
}
81

    
82
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
83
{
84
    uint64_t tmp = op1;
85
    op1 += op2;
86
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
87
        arith_excp(EXC_M_IOV, 0);
88
    }
89
    return op1;
90
}
91

    
92
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
93
{
94
    uint64_t tmp = op1;
95
    op1 = (uint32_t)(op1 + op2);
96
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
97
        arith_excp(EXC_M_IOV, 0);
98
    }
99
    return op1;
100
}
101

    
102
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
103
{
104
    uint64_t res;
105
    res = op1 - op2;
106
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
107
        arith_excp(EXC_M_IOV, 0);
108
    }
109
    return res;
110
}
111

    
112
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
113
{
114
    uint32_t res;
115
    res = op1 - op2;
116
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
117
        arith_excp(EXC_M_IOV, 0);
118
    }
119
    return res;
120
}
121

    
122
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
123
{
124
    int64_t res = (int64_t)op1 * (int64_t)op2;
125

    
126
    if (unlikely((int32_t)res != res)) {
127
        arith_excp(EXC_M_IOV, 0);
128
    }
129
    return (int64_t)((int32_t)res);
130
}
131

    
132
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
133
{
134
    uint64_t tl, th;
135

    
136
    muls64(&tl, &th, op1, op2);
137
    /* If th != 0 && th != -1, then we had an overflow */
138
    if (unlikely((th + 1) > 1)) {
139
        arith_excp(EXC_M_IOV, 0);
140
    }
141
    return tl;
142
}
143

    
144
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
145
{
146
    uint64_t tl, th;
147

    
148
    mulu64(&tl, &th, op1, op2);
149
    return th;
150
}
151

    
152
uint64_t helper_ctpop (uint64_t arg)
153
{
154
    return ctpop64(arg);
155
}
156

    
157
uint64_t helper_ctlz (uint64_t arg)
158
{
159
    return clz64(arg);
160
}
161

    
162
uint64_t helper_cttz (uint64_t arg)
163
{
164
    return ctz64(arg);
165
}
166

    
167
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
168
{
169
    uint64_t mask;
170

    
171
    mask = 0;
172
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
173
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
174
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
175
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
176
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
177
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
178
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
179
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
180

    
181
    return op & ~mask;
182
}
183

    
184
uint64_t helper_zap(uint64_t val, uint64_t mask)
185
{
186
    return byte_zap(val, mask);
187
}
188

    
189
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
190
{
191
    return byte_zap(val, ~mask);
192
}
193

    
194
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
195
{
196
    uint8_t opa, opb, res;
197
    int i;
198

    
199
    res = 0;
200
    for (i = 0; i < 8; i++) {
201
        opa = op1 >> (i * 8);
202
        opb = op2 >> (i * 8);
203
        if (opa >= opb)
204
            res |= 1 << i;
205
    }
206
    return res;
207
}
208

    
209
uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
210
{
211
    uint64_t res = 0;
212
    uint8_t opa, opb, opr;
213
    int i;
214

    
215
    for (i = 0; i < 8; ++i) {
216
        opa = op1 >> (i * 8);
217
        opb = op2 >> (i * 8);
218
        opr = opa < opb ? opa : opb;
219
        res |= (uint64_t)opr << (i * 8);
220
    }
221
    return res;
222
}
223

    
224
uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
225
{
226
    uint64_t res = 0;
227
    int8_t opa, opb;
228
    uint8_t opr;
229
    int i;
230

    
231
    for (i = 0; i < 8; ++i) {
232
        opa = op1 >> (i * 8);
233
        opb = op2 >> (i * 8);
234
        opr = opa < opb ? opa : opb;
235
        res |= (uint64_t)opr << (i * 8);
236
    }
237
    return res;
238
}
239

    
240
uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
241
{
242
    uint64_t res = 0;
243
    uint16_t opa, opb, opr;
244
    int i;
245

    
246
    for (i = 0; i < 4; ++i) {
247
        opa = op1 >> (i * 16);
248
        opb = op2 >> (i * 16);
249
        opr = opa < opb ? opa : opb;
250
        res |= (uint64_t)opr << (i * 16);
251
    }
252
    return res;
253
}
254

    
255
uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
256
{
257
    uint64_t res = 0;
258
    int16_t opa, opb;
259
    uint16_t opr;
260
    int i;
261

    
262
    for (i = 0; i < 4; ++i) {
263
        opa = op1 >> (i * 16);
264
        opb = op2 >> (i * 16);
265
        opr = opa < opb ? opa : opb;
266
        res |= (uint64_t)opr << (i * 16);
267
    }
268
    return res;
269
}
270

    
271
uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
272
{
273
    uint64_t res = 0;
274
    uint8_t opa, opb, opr;
275
    int i;
276

    
277
    for (i = 0; i < 8; ++i) {
278
        opa = op1 >> (i * 8);
279
        opb = op2 >> (i * 8);
280
        opr = opa > opb ? opa : opb;
281
        res |= (uint64_t)opr << (i * 8);
282
    }
283
    return res;
284
}
285

    
286
uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
287
{
288
    uint64_t res = 0;
289
    int8_t opa, opb;
290
    uint8_t opr;
291
    int i;
292

    
293
    for (i = 0; i < 8; ++i) {
294
        opa = op1 >> (i * 8);
295
        opb = op2 >> (i * 8);
296
        opr = opa > opb ? opa : opb;
297
        res |= (uint64_t)opr << (i * 8);
298
    }
299
    return res;
300
}
301

    
302
uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
303
{
304
    uint64_t res = 0;
305
    uint16_t opa, opb, opr;
306
    int i;
307

    
308
    for (i = 0; i < 4; ++i) {
309
        opa = op1 >> (i * 16);
310
        opb = op2 >> (i * 16);
311
        opr = opa > opb ? opa : opb;
312
        res |= (uint64_t)opr << (i * 16);
313
    }
314
    return res;
315
}
316

    
317
uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
318
{
319
    uint64_t res = 0;
320
    int16_t opa, opb;
321
    uint16_t opr;
322
    int i;
323

    
324
    for (i = 0; i < 4; ++i) {
325
        opa = op1 >> (i * 16);
326
        opb = op2 >> (i * 16);
327
        opr = opa > opb ? opa : opb;
328
        res |= (uint64_t)opr << (i * 16);
329
    }
330
    return res;
331
}
332

    
333
uint64_t helper_perr (uint64_t op1, uint64_t op2)
334
{
335
    uint64_t res = 0;
336
    uint8_t opa, opb, opr;
337
    int i;
338

    
339
    for (i = 0; i < 8; ++i) {
340
        opa = op1 >> (i * 8);
341
        opb = op2 >> (i * 8);
342
        if (opa >= opb)
343
            opr = opa - opb;
344
        else
345
            opr = opb - opa;
346
        res += opr;
347
    }
348
    return res;
349
}
350

    
351
uint64_t helper_pklb (uint64_t op1)
352
{
353
    return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
354
}
355

    
356
uint64_t helper_pkwb (uint64_t op1)
357
{
358
    return ((op1 & 0xff)
359
            | ((op1 >> 8) & 0xff00)
360
            | ((op1 >> 16) & 0xff0000)
361
            | ((op1 >> 24) & 0xff000000));
362
}
363

    
364
uint64_t helper_unpkbl (uint64_t op1)
365
{
366
    return (op1 & 0xff) | ((op1 & 0xff00) << 24);
367
}
368

    
369
uint64_t helper_unpkbw (uint64_t op1)
370
{
371
    return ((op1 & 0xff)
372
            | ((op1 & 0xff00) << 8)
373
            | ((op1 & 0xff0000) << 16)
374
            | ((op1 & 0xff000000) << 24));
375
}
376

    
377
/* Floating point helpers */
378

    
379
void helper_setroundmode (uint32_t val)
380
{
381
    set_float_rounding_mode(val, &FP_STATUS);
382
}
383

    
384
void helper_setflushzero (uint32_t val)
385
{
386
    set_flush_to_zero(val, &FP_STATUS);
387
}
388

    
389
void helper_fp_exc_clear (void)
390
{
391
    set_float_exception_flags(0, &FP_STATUS);
392
}
393

    
394
uint32_t helper_fp_exc_get (void)
395
{
396
    return get_float_exception_flags(&FP_STATUS);
397
}
398

    
399
/* Raise exceptions for ieee fp insns without software completion.
400
   In that case there are no exceptions that don't trap; the mask
401
   doesn't apply.  */
402
void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
403
{
404
    if (exc) {
405
        uint32_t hw_exc = 0;
406

    
407
        if (exc & float_flag_invalid) {
408
            hw_exc |= EXC_M_INV;
409
        }
410
        if (exc & float_flag_divbyzero) {
411
            hw_exc |= EXC_M_DZE;
412
        }
413
        if (exc & float_flag_overflow) {
414
            hw_exc |= EXC_M_FOV;
415
        }
416
        if (exc & float_flag_underflow) {
417
            hw_exc |= EXC_M_UNF;
418
        }
419
        if (exc & float_flag_inexact) {
420
            hw_exc |= EXC_M_INE;
421
        }
422

    
423
        arith_excp(hw_exc, 1ull << regno);
424
    }
425
}
426

    
427
/* Raise exceptions for ieee fp insns with software completion.  */
428
void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
429
{
430
    if (exc) {
431
        env->fpcr_exc_status |= exc;
432

    
433
        exc &= ~env->fpcr_exc_mask;
434
        if (exc) {
435
            helper_fp_exc_raise(exc, regno);
436
        }
437
    }
438
}
439

    
440
/* Input remapping without software completion.  Handle denormal-map-to-zero
441
   and trap for all other non-finite numbers.  */
442
uint64_t helper_ieee_input(uint64_t val)
443
{
444
    uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
445
    uint64_t frac = val & 0xfffffffffffffull;
446

    
447
    if (exp == 0) {
448
        if (frac != 0) {
449
            /* If DNZ is set flush denormals to zero on input.  */
450
            if (env->fpcr_dnz) {
451
                val &= 1ull << 63;
452
            } else {
453
                arith_excp(EXC_M_UNF, 0);
454
            }
455
        }
456
    } else if (exp == 0x7ff) {
457
        /* Infinity or NaN.  */
458
        /* ??? I'm not sure these exception bit flags are correct.  I do
459
           know that the Linux kernel, at least, doesn't rely on them and
460
           just emulates the insn to figure out what exception to use.  */
461
        arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
462
    }
463
    return val;
464
}
465

    
466
/* Similar, but does not trap for infinities.  Used for comparisons.  */
467
uint64_t helper_ieee_input_cmp(uint64_t val)
468
{
469
    uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
470
    uint64_t frac = val & 0xfffffffffffffull;
471

    
472
    if (exp == 0) {
473
        if (frac != 0) {
474
            /* If DNZ is set flush denormals to zero on input.  */
475
            if (env->fpcr_dnz) {
476
                val &= 1ull << 63;
477
            } else {
478
                arith_excp(EXC_M_UNF, 0);
479
            }
480
        }
481
    } else if (exp == 0x7ff && frac) {
482
        /* NaN.  */
483
        arith_excp(EXC_M_INV, 0);
484
    }
485
    return val;
486
}
487

    
488
/* Input remapping with software completion enabled.  All we have to do
489
   is handle denormal-map-to-zero; all other inputs get exceptions as
490
   needed from the actual operation.  */
491
uint64_t helper_ieee_input_s(uint64_t val)
492
{
493
    if (env->fpcr_dnz) {
494
        uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
495
        if (exp == 0) {
496
            val &= 1ull << 63;
497
        }
498
    }
499
    return val;
500
}
501

    
502
/* F floating (VAX) */
503
static inline uint64_t float32_to_f(float32 fa)
504
{
505
    uint64_t r, exp, mant, sig;
506
    CPU_FloatU a;
507

    
508
    a.f = fa;
509
    sig = ((uint64_t)a.l & 0x80000000) << 32;
510
    exp = (a.l >> 23) & 0xff;
511
    mant = ((uint64_t)a.l & 0x007fffff) << 29;
512

    
513
    if (exp == 255) {
514
        /* NaN or infinity */
515
        r = 1; /* VAX dirty zero */
516
    } else if (exp == 0) {
517
        if (mant == 0) {
518
            /* Zero */
519
            r = 0;
520
        } else {
521
            /* Denormalized */
522
            r = sig | ((exp + 1) << 52) | mant;
523
        }
524
    } else {
525
        if (exp >= 253) {
526
            /* Overflow */
527
            r = 1; /* VAX dirty zero */
528
        } else {
529
            r = sig | ((exp + 2) << 52);
530
        }
531
    }
532

    
533
    return r;
534
}
535

    
536
static inline float32 f_to_float32(uint64_t a)
537
{
538
    uint32_t exp, mant_sig;
539
    CPU_FloatU r;
540

    
541
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
542
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
543

    
544
    if (unlikely(!exp && mant_sig)) {
545
        /* Reserved operands / Dirty zero */
546
        dynamic_excp(EXCP_OPCDEC, 0);
547
    }
548

    
549
    if (exp < 3) {
550
        /* Underflow */
551
        r.l = 0;
552
    } else {
553
        r.l = ((exp - 2) << 23) | mant_sig;
554
    }
555

    
556
    return r.f;
557
}
558

    
559
uint32_t helper_f_to_memory (uint64_t a)
560
{
561
    uint32_t r;
562
    r =  (a & 0x00001fffe0000000ull) >> 13;
563
    r |= (a & 0x07ffe00000000000ull) >> 45;
564
    r |= (a & 0xc000000000000000ull) >> 48;
565
    return r;
566
}
567

    
568
uint64_t helper_memory_to_f (uint32_t a)
569
{
570
    uint64_t r;
571
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
572
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
573
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
574
    if (!(a & 0x00004000))
575
        r |= 0x7ll << 59;
576
    return r;
577
}
578

    
579
/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong.  We should
580
   either implement VAX arithmetic properly or just signal invalid opcode.  */
581

    
582
uint64_t helper_addf (uint64_t a, uint64_t b)
583
{
584
    float32 fa, fb, fr;
585

    
586
    fa = f_to_float32(a);
587
    fb = f_to_float32(b);
588
    fr = float32_add(fa, fb, &FP_STATUS);
589
    return float32_to_f(fr);
590
}
591

    
592
uint64_t helper_subf (uint64_t a, uint64_t b)
593
{
594
    float32 fa, fb, fr;
595

    
596
    fa = f_to_float32(a);
597
    fb = f_to_float32(b);
598
    fr = float32_sub(fa, fb, &FP_STATUS);
599
    return float32_to_f(fr);
600
}
601

    
602
uint64_t helper_mulf (uint64_t a, uint64_t b)
603
{
604
    float32 fa, fb, fr;
605

    
606
    fa = f_to_float32(a);
607
    fb = f_to_float32(b);
608
    fr = float32_mul(fa, fb, &FP_STATUS);
609
    return float32_to_f(fr);
610
}
611

    
612
uint64_t helper_divf (uint64_t a, uint64_t b)
613
{
614
    float32 fa, fb, fr;
615

    
616
    fa = f_to_float32(a);
617
    fb = f_to_float32(b);
618
    fr = float32_div(fa, fb, &FP_STATUS);
619
    return float32_to_f(fr);
620
}
621

    
622
uint64_t helper_sqrtf (uint64_t t)
623
{
624
    float32 ft, fr;
625

    
626
    ft = f_to_float32(t);
627
    fr = float32_sqrt(ft, &FP_STATUS);
628
    return float32_to_f(fr);
629
}
630

    
631

    
632
/* G floating (VAX) */
633
static inline uint64_t float64_to_g(float64 fa)
634
{
635
    uint64_t r, exp, mant, sig;
636
    CPU_DoubleU a;
637

    
638
    a.d = fa;
639
    sig = a.ll & 0x8000000000000000ull;
640
    exp = (a.ll >> 52) & 0x7ff;
641
    mant = a.ll & 0x000fffffffffffffull;
642

    
643
    if (exp == 2047) {
644
        /* NaN or infinity */
645
        r = 1; /* VAX dirty zero */
646
    } else if (exp == 0) {
647
        if (mant == 0) {
648
            /* Zero */
649
            r = 0;
650
        } else {
651
            /* Denormalized */
652
            r = sig | ((exp + 1) << 52) | mant;
653
        }
654
    } else {
655
        if (exp >= 2045) {
656
            /* Overflow */
657
            r = 1; /* VAX dirty zero */
658
        } else {
659
            r = sig | ((exp + 2) << 52);
660
        }
661
    }
662

    
663
    return r;
664
}
665

    
666
static inline float64 g_to_float64(uint64_t a)
667
{
668
    uint64_t exp, mant_sig;
669
    CPU_DoubleU r;
670

    
671
    exp = (a >> 52) & 0x7ff;
672
    mant_sig = a & 0x800fffffffffffffull;
673

    
674
    if (!exp && mant_sig) {
675
        /* Reserved operands / Dirty zero */
676
        dynamic_excp(EXCP_OPCDEC, 0);
677
    }
678

    
679
    if (exp < 3) {
680
        /* Underflow */
681
        r.ll = 0;
682
    } else {
683
        r.ll = ((exp - 2) << 52) | mant_sig;
684
    }
685

    
686
    return r.d;
687
}
688

    
689
uint64_t helper_g_to_memory (uint64_t a)
690
{
691
    uint64_t r;
692
    r =  (a & 0x000000000000ffffull) << 48;
693
    r |= (a & 0x00000000ffff0000ull) << 16;
694
    r |= (a & 0x0000ffff00000000ull) >> 16;
695
    r |= (a & 0xffff000000000000ull) >> 48;
696
    return r;
697
}
698

    
699
uint64_t helper_memory_to_g (uint64_t a)
700
{
701
    uint64_t r;
702
    r =  (a & 0x000000000000ffffull) << 48;
703
    r |= (a & 0x00000000ffff0000ull) << 16;
704
    r |= (a & 0x0000ffff00000000ull) >> 16;
705
    r |= (a & 0xffff000000000000ull) >> 48;
706
    return r;
707
}
708

    
709
uint64_t helper_addg (uint64_t a, uint64_t b)
710
{
711
    float64 fa, fb, fr;
712

    
713
    fa = g_to_float64(a);
714
    fb = g_to_float64(b);
715
    fr = float64_add(fa, fb, &FP_STATUS);
716
    return float64_to_g(fr);
717
}
718

    
719
uint64_t helper_subg (uint64_t a, uint64_t b)
720
{
721
    float64 fa, fb, fr;
722

    
723
    fa = g_to_float64(a);
724
    fb = g_to_float64(b);
725
    fr = float64_sub(fa, fb, &FP_STATUS);
726
    return float64_to_g(fr);
727
}
728

    
729
uint64_t helper_mulg (uint64_t a, uint64_t b)
730
{
731
    float64 fa, fb, fr;
732

    
733
    fa = g_to_float64(a);
734
    fb = g_to_float64(b);
735
    fr = float64_mul(fa, fb, &FP_STATUS);
736
    return float64_to_g(fr);
737
}
738

    
739
uint64_t helper_divg (uint64_t a, uint64_t b)
740
{
741
    float64 fa, fb, fr;
742

    
743
    fa = g_to_float64(a);
744
    fb = g_to_float64(b);
745
    fr = float64_div(fa, fb, &FP_STATUS);
746
    return float64_to_g(fr);
747
}
748

    
749
uint64_t helper_sqrtg (uint64_t a)
750
{
751
    float64 fa, fr;
752

    
753
    fa = g_to_float64(a);
754
    fr = float64_sqrt(fa, &FP_STATUS);
755
    return float64_to_g(fr);
756
}
757

    
758

    
759
/* S floating (single) */
760

    
761
/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg.  */
762
static inline uint64_t float32_to_s_int(uint32_t fi)
763
{
764
    uint32_t frac = fi & 0x7fffff;
765
    uint32_t sign = fi >> 31;
766
    uint32_t exp_msb = (fi >> 30) & 1;
767
    uint32_t exp_low = (fi >> 23) & 0x7f;
768
    uint32_t exp;
769

    
770
    exp = (exp_msb << 10) | exp_low;
771
    if (exp_msb) {
772
        if (exp_low == 0x7f)
773
            exp = 0x7ff;
774
    } else {
775
        if (exp_low != 0x00)
776
            exp |= 0x380;
777
    }
778

    
779
    return (((uint64_t)sign << 63)
780
            | ((uint64_t)exp << 52)
781
            | ((uint64_t)frac << 29));
782
}
783

    
784
static inline uint64_t float32_to_s(float32 fa)
785
{
786
    CPU_FloatU a;
787
    a.f = fa;
788
    return float32_to_s_int(a.l);
789
}
790

    
791
static inline uint32_t s_to_float32_int(uint64_t a)
792
{
793
    return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
794
}
795

    
796
static inline float32 s_to_float32(uint64_t a)
797
{
798
    CPU_FloatU r;
799
    r.l = s_to_float32_int(a);
800
    return r.f;
801
}
802

    
803
uint32_t helper_s_to_memory (uint64_t a)
804
{
805
    return s_to_float32_int(a);
806
}
807

    
808
uint64_t helper_memory_to_s (uint32_t a)
809
{
810
    return float32_to_s_int(a);
811
}
812

    
813
uint64_t helper_adds (uint64_t a, uint64_t b)
814
{
815
    float32 fa, fb, fr;
816

    
817
    fa = s_to_float32(a);
818
    fb = s_to_float32(b);
819
    fr = float32_add(fa, fb, &FP_STATUS);
820
    return float32_to_s(fr);
821
}
822

    
823
uint64_t helper_subs (uint64_t a, uint64_t b)
824
{
825
    float32 fa, fb, fr;
826

    
827
    fa = s_to_float32(a);
828
    fb = s_to_float32(b);
829
    fr = float32_sub(fa, fb, &FP_STATUS);
830
    return float32_to_s(fr);
831
}
832

    
833
uint64_t helper_muls (uint64_t a, uint64_t b)
834
{
835
    float32 fa, fb, fr;
836

    
837
    fa = s_to_float32(a);
838
    fb = s_to_float32(b);
839
    fr = float32_mul(fa, fb, &FP_STATUS);
840
    return float32_to_s(fr);
841
}
842

    
843
uint64_t helper_divs (uint64_t a, uint64_t b)
844
{
845
    float32 fa, fb, fr;
846

    
847
    fa = s_to_float32(a);
848
    fb = s_to_float32(b);
849
    fr = float32_div(fa, fb, &FP_STATUS);
850
    return float32_to_s(fr);
851
}
852

    
853
uint64_t helper_sqrts (uint64_t a)
854
{
855
    float32 fa, fr;
856

    
857
    fa = s_to_float32(a);
858
    fr = float32_sqrt(fa, &FP_STATUS);
859
    return float32_to_s(fr);
860
}
861

    
862

    
863
/* T floating (double) */
864
static inline float64 t_to_float64(uint64_t a)
865
{
866
    /* Memory format is the same as float64 */
867
    CPU_DoubleU r;
868
    r.ll = a;
869
    return r.d;
870
}
871

    
872
static inline uint64_t float64_to_t(float64 fa)
873
{
874
    /* Memory format is the same as float64 */
875
    CPU_DoubleU r;
876
    r.d = fa;
877
    return r.ll;
878
}
879

    
880
uint64_t helper_addt (uint64_t a, uint64_t b)
881
{
882
    float64 fa, fb, fr;
883

    
884
    fa = t_to_float64(a);
885
    fb = t_to_float64(b);
886
    fr = float64_add(fa, fb, &FP_STATUS);
887
    return float64_to_t(fr);
888
}
889

    
890
uint64_t helper_subt (uint64_t a, uint64_t b)
891
{
892
    float64 fa, fb, fr;
893

    
894
    fa = t_to_float64(a);
895
    fb = t_to_float64(b);
896
    fr = float64_sub(fa, fb, &FP_STATUS);
897
    return float64_to_t(fr);
898
}
899

    
900
uint64_t helper_mult (uint64_t a, uint64_t b)
901
{
902
    float64 fa, fb, fr;
903

    
904
    fa = t_to_float64(a);
905
    fb = t_to_float64(b);
906
    fr = float64_mul(fa, fb, &FP_STATUS);
907
    return float64_to_t(fr);
908
}
909

    
910
uint64_t helper_divt (uint64_t a, uint64_t b)
911
{
912
    float64 fa, fb, fr;
913

    
914
    fa = t_to_float64(a);
915
    fb = t_to_float64(b);
916
    fr = float64_div(fa, fb, &FP_STATUS);
917
    return float64_to_t(fr);
918
}
919

    
920
uint64_t helper_sqrtt (uint64_t a)
921
{
922
    float64 fa, fr;
923

    
924
    fa = t_to_float64(a);
925
    fr = float64_sqrt(fa, &FP_STATUS);
926
    return float64_to_t(fr);
927
}
928

    
929
/* Comparisons */
930
uint64_t helper_cmptun (uint64_t a, uint64_t b)
931
{
932
    float64 fa, fb;
933

    
934
    fa = t_to_float64(a);
935
    fb = t_to_float64(b);
936

    
937
    if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
938
        return 0x4000000000000000ULL;
939
    } else {
940
        return 0;
941
    }
942
}
943

    
944
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
945
{
946
    float64 fa, fb;
947

    
948
    fa = t_to_float64(a);
949
    fb = t_to_float64(b);
950

    
951
    if (float64_eq_quiet(fa, fb, &FP_STATUS))
952
        return 0x4000000000000000ULL;
953
    else
954
        return 0;
955
}
956

    
957
uint64_t helper_cmptle(uint64_t a, uint64_t b)
958
{
959
    float64 fa, fb;
960

    
961
    fa = t_to_float64(a);
962
    fb = t_to_float64(b);
963

    
964
    if (float64_le(fa, fb, &FP_STATUS))
965
        return 0x4000000000000000ULL;
966
    else
967
        return 0;
968
}
969

    
970
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
971
{
972
    float64 fa, fb;
973

    
974
    fa = t_to_float64(a);
975
    fb = t_to_float64(b);
976

    
977
    if (float64_lt(fa, fb, &FP_STATUS))
978
        return 0x4000000000000000ULL;
979
    else
980
        return 0;
981
}
982

    
983
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
984
{
985
    float64 fa, fb;
986

    
987
    fa = g_to_float64(a);
988
    fb = g_to_float64(b);
989

    
990
    if (float64_eq_quiet(fa, fb, &FP_STATUS))
991
        return 0x4000000000000000ULL;
992
    else
993
        return 0;
994
}
995

    
996
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
997
{
998
    float64 fa, fb;
999

    
1000
    fa = g_to_float64(a);
1001
    fb = g_to_float64(b);
1002

    
1003
    if (float64_le(fa, fb, &FP_STATUS))
1004
        return 0x4000000000000000ULL;
1005
    else
1006
        return 0;
1007
}
1008

    
1009
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1010
{
1011
    float64 fa, fb;
1012

    
1013
    fa = g_to_float64(a);
1014
    fb = g_to_float64(b);
1015

    
1016
    if (float64_lt(fa, fb, &FP_STATUS))
1017
        return 0x4000000000000000ULL;
1018
    else
1019
        return 0;
1020
}
1021

    
1022
/* Floating point format conversion */
1023
uint64_t helper_cvtts (uint64_t a)
1024
{
1025
    float64 fa;
1026
    float32 fr;
1027

    
1028
    fa = t_to_float64(a);
1029
    fr = float64_to_float32(fa, &FP_STATUS);
1030
    return float32_to_s(fr);
1031
}
1032

    
1033
uint64_t helper_cvtst (uint64_t a)
1034
{
1035
    float32 fa;
1036
    float64 fr;
1037

    
1038
    fa = s_to_float32(a);
1039
    fr = float32_to_float64(fa, &FP_STATUS);
1040
    return float64_to_t(fr);
1041
}
1042

    
1043
uint64_t helper_cvtqs (uint64_t a)
1044
{
1045
    float32 fr = int64_to_float32(a, &FP_STATUS);
1046
    return float32_to_s(fr);
1047
}
1048

    
1049
/* Implement float64 to uint64 conversion without saturation -- we must
1050
   supply the truncated result.  This behaviour is used by the compiler
1051
   to get unsigned conversion for free with the same instruction.
1052

1053
   The VI flag is set when overflow or inexact exceptions should be raised.  */
1054

    
1055
static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1056
{
1057
    uint64_t frac, ret = 0;
1058
    uint32_t exp, sign, exc = 0;
1059
    int shift;
1060

    
1061
    sign = (a >> 63);
1062
    exp = (uint32_t)(a >> 52) & 0x7ff;
1063
    frac = a & 0xfffffffffffffull;
1064

    
1065
    if (exp == 0) {
1066
        if (unlikely(frac != 0)) {
1067
            goto do_underflow;
1068
        }
1069
    } else if (exp == 0x7ff) {
1070
        exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1071
    } else {
1072
        /* Restore implicit bit.  */
1073
        frac |= 0x10000000000000ull;
1074

    
1075
        shift = exp - 1023 - 52;
1076
        if (shift >= 0) {
1077
            /* In this case the number is so large that we must shift
1078
               the fraction left.  There is no rounding to do.  */
1079
            if (shift < 63) {
1080
                ret = frac << shift;
1081
                if (VI && (ret >> shift) != frac) {
1082
                    exc = float_flag_overflow;
1083
                }
1084
            }
1085
        } else {
1086
            uint64_t round;
1087

    
1088
            /* In this case the number is smaller than the fraction as
1089
               represented by the 52 bit number.  Here we must think
1090
               about rounding the result.  Handle this by shifting the
1091
               fractional part of the number into the high bits of ROUND.
1092
               This will let us efficiently handle round-to-nearest.  */
1093
            shift = -shift;
1094
            if (shift < 63) {
1095
                ret = frac >> shift;
1096
                round = frac << (64 - shift);
1097
            } else {
1098
                /* The exponent is so small we shift out everything.
1099
                   Leave a sticky bit for proper rounding below.  */
1100
            do_underflow:
1101
                round = 1;
1102
            }
1103

    
1104
            if (round) {
1105
                exc = (VI ? float_flag_inexact : 0);
1106
                switch (roundmode) {
1107
                case float_round_nearest_even:
1108
                    if (round == (1ull << 63)) {
1109
                        /* Fraction is exactly 0.5; round to even.  */
1110
                        ret += (ret & 1);
1111
                    } else if (round > (1ull << 63)) {
1112
                        ret += 1;
1113
                    }
1114
                    break;
1115
                case float_round_to_zero:
1116
                    break;
1117
                case float_round_up:
1118
                    ret += 1 - sign;
1119
                    break;
1120
                case float_round_down:
1121
                    ret += sign;
1122
                    break;
1123
                }
1124
            }
1125
        }
1126
        if (sign) {
1127
            ret = -ret;
1128
        }
1129
    }
1130
    if (unlikely(exc)) {
1131
        float_raise(exc, &FP_STATUS);
1132
    }
1133

    
1134
    return ret;
1135
}
1136

    
1137
uint64_t helper_cvttq(uint64_t a)
1138
{
1139
    return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1140
}
1141

    
1142
uint64_t helper_cvttq_c(uint64_t a)
1143
{
1144
    return helper_cvttq_internal(a, float_round_to_zero, 0);
1145
}
1146

    
1147
uint64_t helper_cvttq_svic(uint64_t a)
1148
{
1149
    return helper_cvttq_internal(a, float_round_to_zero, 1);
1150
}
1151

    
1152
uint64_t helper_cvtqt (uint64_t a)
1153
{
1154
    float64 fr = int64_to_float64(a, &FP_STATUS);
1155
    return float64_to_t(fr);
1156
}
1157

    
1158
uint64_t helper_cvtqf (uint64_t a)
1159
{
1160
    float32 fr = int64_to_float32(a, &FP_STATUS);
1161
    return float32_to_f(fr);
1162
}
1163

    
1164
uint64_t helper_cvtgf (uint64_t a)
1165
{
1166
    float64 fa;
1167
    float32 fr;
1168

    
1169
    fa = g_to_float64(a);
1170
    fr = float64_to_float32(fa, &FP_STATUS);
1171
    return float32_to_f(fr);
1172
}
1173

    
1174
uint64_t helper_cvtgq (uint64_t a)
1175
{
1176
    float64 fa = g_to_float64(a);
1177
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1178
}
1179

    
1180
uint64_t helper_cvtqg (uint64_t a)
1181
{
1182
    float64 fr;
1183
    fr = int64_to_float64(a, &FP_STATUS);
1184
    return float64_to_g(fr);
1185
}
1186

    
1187
/* PALcode support special instructions */
1188
#if !defined (CONFIG_USER_ONLY)
1189
void helper_hw_ret (uint64_t a)
1190
{
1191
    env->pc = a & ~3;
1192
    env->pal_mode = a & 1;
1193
    env->intr_flag = 0;
1194
    env->lock_addr = -1;
1195
}
1196
#endif
1197

    
1198
/*****************************************************************************/
1199
/* Softmmu support */
1200
#if !defined (CONFIG_USER_ONLY)
1201
uint64_t helper_ldl_phys(uint64_t p)
1202
{
1203
    return (int32_t)ldl_phys(p);
1204
}
1205

    
1206
uint64_t helper_ldq_phys(uint64_t p)
1207
{
1208
    return ldq_phys(p);
1209
}
1210

    
1211
uint64_t helper_ldl_l_phys(uint64_t p)
1212
{
1213
    env->lock_addr = p;
1214
    return env->lock_value = (int32_t)ldl_phys(p);
1215
}
1216

    
1217
uint64_t helper_ldq_l_phys(uint64_t p)
1218
{
1219
    env->lock_addr = p;
1220
    return env->lock_value = ldl_phys(p);
1221
}
1222

    
1223
void helper_stl_phys(uint64_t p, uint64_t v)
1224
{
1225
    stl_phys(p, v);
1226
}
1227

    
1228
void helper_stq_phys(uint64_t p, uint64_t v)
1229
{
1230
    stq_phys(p, v);
1231
}
1232

    
1233
uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
1234
{
1235
    uint64_t ret = 0;
1236

    
1237
    if (p == env->lock_addr) {
1238
        int32_t old = ldl_phys(p);
1239
        if (old == (int32_t)env->lock_value) {
1240
            stl_phys(p, v);
1241
            ret = 1;
1242
        }
1243
    }
1244
    env->lock_addr = -1;
1245

    
1246
    return ret;
1247
}
1248

    
1249
uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
1250
{
1251
    uint64_t ret = 0;
1252

    
1253
    if (p == env->lock_addr) {
1254
        uint64_t old = ldq_phys(p);
1255
        if (old == env->lock_value) {
1256
            stq_phys(p, v);
1257
            ret = 1;
1258
        }
1259
    }
1260
    env->lock_addr = -1;
1261

    
1262
    return ret;
1263
}
1264

    
1265
#define MMUSUFFIX _mmu
1266

    
1267
#define SHIFT 0
1268
#include "softmmu_template.h"
1269

    
1270
#define SHIFT 1
1271
#include "softmmu_template.h"
1272

    
1273
#define SHIFT 2
1274
#include "softmmu_template.h"
1275

    
1276
#define SHIFT 3
1277
#include "softmmu_template.h"
1278

    
1279
/* try to fill the TLB and return an exception if error. If retaddr is
1280
   NULL, it means that the function was called in C code (i.e. not
1281
   from generated code or from helper.c) */
1282
/* XXX: fix it to restore all registers */
1283
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1284
{
1285
    CPUState *saved_env;
1286
    int ret;
1287

    
1288
    /* XXX: hack to restore env in all cases, even if not called from
1289
       generated code */
1290
    saved_env = env;
1291
    env = cpu_single_env;
1292
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1293
    if (unlikely(ret != 0)) {
1294
        do_restore_state(retaddr);
1295
        /* Exception index and error code are already set */
1296
        cpu_loop_exit();
1297
    }
1298
    env = saved_env;
1299
}
1300

    
1301
#endif