Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ a4d2d1a0

History | View | Annotate | Download (30.3 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "softfloat.h"
23
#include "helper.h"
24
#include "qemu-timer.h"
25

    
26
/*****************************************************************************/
27
/* Exceptions processing helpers */
28
void QEMU_NORETURN helper_excp (int excp, int error)
29
{
30
    env->exception_index = excp;
31
    env->error_code = error;
32
    cpu_loop_exit();
33
}
34

    
35
uint64_t helper_load_pcc (void)
36
{
37
    /* ??? This isn't a timer for which we have any rate info.  */
38
    return (uint32_t)cpu_get_real_ticks();
39
}
40

    
41
uint64_t helper_load_fpcr (void)
42
{
43
    return cpu_alpha_load_fpcr (env);
44
}
45

    
46
void helper_store_fpcr (uint64_t val)
47
{
48
    cpu_alpha_store_fpcr (env, val);
49
}
50

    
51
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
52
{
53
    uint64_t tmp = op1;
54
    op1 += op2;
55
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
56
        helper_excp(EXCP_ARITH, EXC_M_IOV);
57
    }
58
    return op1;
59
}
60

    
61
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
62
{
63
    uint64_t tmp = op1;
64
    op1 = (uint32_t)(op1 + op2);
65
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
66
        helper_excp(EXCP_ARITH, EXC_M_IOV);
67
    }
68
    return op1;
69
}
70

    
71
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
72
{
73
    uint64_t res;
74
    res = op1 - op2;
75
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
76
        helper_excp(EXCP_ARITH, EXC_M_IOV);
77
    }
78
    return res;
79
}
80

    
81
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
82
{
83
    uint32_t res;
84
    res = op1 - op2;
85
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
86
        helper_excp(EXCP_ARITH, EXC_M_IOV);
87
    }
88
    return res;
89
}
90

    
91
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
92
{
93
    int64_t res = (int64_t)op1 * (int64_t)op2;
94

    
95
    if (unlikely((int32_t)res != res)) {
96
        helper_excp(EXCP_ARITH, EXC_M_IOV);
97
    }
98
    return (int64_t)((int32_t)res);
99
}
100

    
101
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
102
{
103
    uint64_t tl, th;
104

    
105
    muls64(&tl, &th, op1, op2);
106
    /* If th != 0 && th != -1, then we had an overflow */
107
    if (unlikely((th + 1) > 1)) {
108
        helper_excp(EXCP_ARITH, EXC_M_IOV);
109
    }
110
    return tl;
111
}
112

    
113
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
114
{
115
    uint64_t tl, th;
116

    
117
    mulu64(&tl, &th, op1, op2);
118
    return th;
119
}
120

    
121
uint64_t helper_ctpop (uint64_t arg)
122
{
123
    return ctpop64(arg);
124
}
125

    
126
uint64_t helper_ctlz (uint64_t arg)
127
{
128
    return clz64(arg);
129
}
130

    
131
uint64_t helper_cttz (uint64_t arg)
132
{
133
    return ctz64(arg);
134
}
135

    
136
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
137
{
138
    uint64_t mask;
139

    
140
    mask = 0;
141
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
142
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
143
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
144
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
145
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
146
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
147
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
148
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
149

    
150
    return op & ~mask;
151
}
152

    
153
uint64_t helper_zap(uint64_t val, uint64_t mask)
154
{
155
    return byte_zap(val, mask);
156
}
157

    
158
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
159
{
160
    return byte_zap(val, ~mask);
161
}
162

    
163
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
164
{
165
    uint8_t opa, opb, res;
166
    int i;
167

    
168
    res = 0;
169
    for (i = 0; i < 8; i++) {
170
        opa = op1 >> (i * 8);
171
        opb = op2 >> (i * 8);
172
        if (opa >= opb)
173
            res |= 1 << i;
174
    }
175
    return res;
176
}
177

    
178
uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
179
{
180
    uint64_t res = 0;
181
    uint8_t opa, opb, opr;
182
    int i;
183

    
184
    for (i = 0; i < 8; ++i) {
185
        opa = op1 >> (i * 8);
186
        opb = op2 >> (i * 8);
187
        opr = opa < opb ? opa : opb;
188
        res |= (uint64_t)opr << (i * 8);
189
    }
190
    return res;
191
}
192

    
193
uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
194
{
195
    uint64_t res = 0;
196
    int8_t opa, opb;
197
    uint8_t opr;
198
    int i;
199

    
200
    for (i = 0; i < 8; ++i) {
201
        opa = op1 >> (i * 8);
202
        opb = op2 >> (i * 8);
203
        opr = opa < opb ? opa : opb;
204
        res |= (uint64_t)opr << (i * 8);
205
    }
206
    return res;
207
}
208

    
209
uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
210
{
211
    uint64_t res = 0;
212
    uint16_t opa, opb, opr;
213
    int i;
214

    
215
    for (i = 0; i < 4; ++i) {
216
        opa = op1 >> (i * 16);
217
        opb = op2 >> (i * 16);
218
        opr = opa < opb ? opa : opb;
219
        res |= (uint64_t)opr << (i * 16);
220
    }
221
    return res;
222
}
223

    
224
uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
225
{
226
    uint64_t res = 0;
227
    int16_t opa, opb;
228
    uint16_t opr;
229
    int i;
230

    
231
    for (i = 0; i < 4; ++i) {
232
        opa = op1 >> (i * 16);
233
        opb = op2 >> (i * 16);
234
        opr = opa < opb ? opa : opb;
235
        res |= (uint64_t)opr << (i * 16);
236
    }
237
    return res;
238
}
239

    
240
uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
241
{
242
    uint64_t res = 0;
243
    uint8_t opa, opb, opr;
244
    int i;
245

    
246
    for (i = 0; i < 8; ++i) {
247
        opa = op1 >> (i * 8);
248
        opb = op2 >> (i * 8);
249
        opr = opa > opb ? opa : opb;
250
        res |= (uint64_t)opr << (i * 8);
251
    }
252
    return res;
253
}
254

    
255
uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
256
{
257
    uint64_t res = 0;
258
    int8_t opa, opb;
259
    uint8_t opr;
260
    int i;
261

    
262
    for (i = 0; i < 8; ++i) {
263
        opa = op1 >> (i * 8);
264
        opb = op2 >> (i * 8);
265
        opr = opa > opb ? opa : opb;
266
        res |= (uint64_t)opr << (i * 8);
267
    }
268
    return res;
269
}
270

    
271
uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
272
{
273
    uint64_t res = 0;
274
    uint16_t opa, opb, opr;
275
    int i;
276

    
277
    for (i = 0; i < 4; ++i) {
278
        opa = op1 >> (i * 16);
279
        opb = op2 >> (i * 16);
280
        opr = opa > opb ? opa : opb;
281
        res |= (uint64_t)opr << (i * 16);
282
    }
283
    return res;
284
}
285

    
286
uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
287
{
288
    uint64_t res = 0;
289
    int16_t opa, opb;
290
    uint16_t opr;
291
    int i;
292

    
293
    for (i = 0; i < 4; ++i) {
294
        opa = op1 >> (i * 16);
295
        opb = op2 >> (i * 16);
296
        opr = opa > opb ? opa : opb;
297
        res |= (uint64_t)opr << (i * 16);
298
    }
299
    return res;
300
}
301

    
302
uint64_t helper_perr (uint64_t op1, uint64_t op2)
303
{
304
    uint64_t res = 0;
305
    uint8_t opa, opb, opr;
306
    int i;
307

    
308
    for (i = 0; i < 8; ++i) {
309
        opa = op1 >> (i * 8);
310
        opb = op2 >> (i * 8);
311
        if (opa >= opb)
312
            opr = opa - opb;
313
        else
314
            opr = opb - opa;
315
        res += opr;
316
    }
317
    return res;
318
}
319

    
320
uint64_t helper_pklb (uint64_t op1)
321
{
322
    return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
323
}
324

    
325
uint64_t helper_pkwb (uint64_t op1)
326
{
327
    return ((op1 & 0xff)
328
            | ((op1 >> 8) & 0xff00)
329
            | ((op1 >> 16) & 0xff0000)
330
            | ((op1 >> 24) & 0xff000000));
331
}
332

    
333
uint64_t helper_unpkbl (uint64_t op1)
334
{
335
    return (op1 & 0xff) | ((op1 & 0xff00) << 24);
336
}
337

    
338
uint64_t helper_unpkbw (uint64_t op1)
339
{
340
    return ((op1 & 0xff)
341
            | ((op1 & 0xff00) << 8)
342
            | ((op1 & 0xff0000) << 16)
343
            | ((op1 & 0xff000000) << 24));
344
}
345

    
346
/* Floating point helpers */
347

    
348
void helper_setroundmode (uint32_t val)
349
{
350
    set_float_rounding_mode(val, &FP_STATUS);
351
}
352

    
353
void helper_setflushzero (uint32_t val)
354
{
355
    set_flush_to_zero(val, &FP_STATUS);
356
}
357

    
358
void helper_fp_exc_clear (void)
359
{
360
    set_float_exception_flags(0, &FP_STATUS);
361
}
362

    
363
uint32_t helper_fp_exc_get (void)
364
{
365
    return get_float_exception_flags(&FP_STATUS);
366
}
367

    
368
/* Raise exceptions for ieee fp insns without software completion.
369
   In that case there are no exceptions that don't trap; the mask
370
   doesn't apply.  */
371
void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
372
{
373
    if (exc) {
374
        uint32_t hw_exc = 0;
375

    
376
        env->ipr[IPR_EXC_MASK] |= 1ull << regno;
377

    
378
        if (exc & float_flag_invalid) {
379
            hw_exc |= EXC_M_INV;
380
        }
381
        if (exc & float_flag_divbyzero) {
382
            hw_exc |= EXC_M_DZE;
383
        }
384
        if (exc & float_flag_overflow) {
385
            hw_exc |= EXC_M_FOV;
386
        }
387
        if (exc & float_flag_underflow) {
388
            hw_exc |= EXC_M_UNF;
389
        }
390
        if (exc & float_flag_inexact) {
391
            hw_exc |= EXC_M_INE;
392
        }
393
        helper_excp(EXCP_ARITH, hw_exc);
394
    }
395
}
396

    
397
/* Raise exceptions for ieee fp insns with software completion.  */
398
void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
399
{
400
    if (exc) {
401
        env->fpcr_exc_status |= exc;
402

    
403
        exc &= ~env->fpcr_exc_mask;
404
        if (exc) {
405
            helper_fp_exc_raise(exc, regno);
406
        }
407
    }
408
}
409

    
410
/* Input remapping without software completion.  Handle denormal-map-to-zero
411
   and trap for all other non-finite numbers.  */
412
uint64_t helper_ieee_input(uint64_t val)
413
{
414
    uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
415
    uint64_t frac = val & 0xfffffffffffffull;
416

    
417
    if (exp == 0) {
418
        if (frac != 0) {
419
            /* If DNZ is set flush denormals to zero on input.  */
420
            if (env->fpcr_dnz) {
421
                val &= 1ull << 63;
422
            } else {
423
                helper_excp(EXCP_ARITH, EXC_M_UNF);
424
            }
425
        }
426
    } else if (exp == 0x7ff) {
427
        /* Infinity or NaN.  */
428
        /* ??? I'm not sure these exception bit flags are correct.  I do
429
           know that the Linux kernel, at least, doesn't rely on them and
430
           just emulates the insn to figure out what exception to use.  */
431
        helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV);
432
    }
433
    return val;
434
}
435

    
436
/* Similar, but does not trap for infinities.  Used for comparisons.  */
437
uint64_t helper_ieee_input_cmp(uint64_t val)
438
{
439
    uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
440
    uint64_t frac = val & 0xfffffffffffffull;
441

    
442
    if (exp == 0) {
443
        if (frac != 0) {
444
            /* If DNZ is set flush denormals to zero on input.  */
445
            if (env->fpcr_dnz) {
446
                val &= 1ull << 63;
447
            } else {
448
                helper_excp(EXCP_ARITH, EXC_M_UNF);
449
            }
450
        }
451
    } else if (exp == 0x7ff && frac) {
452
        /* NaN.  */
453
        helper_excp(EXCP_ARITH, EXC_M_INV);
454
    }
455
    return val;
456
}
457

    
458
/* Input remapping with software completion enabled.  All we have to do
459
   is handle denormal-map-to-zero; all other inputs get exceptions as
460
   needed from the actual operation.  */
461
uint64_t helper_ieee_input_s(uint64_t val)
462
{
463
    if (env->fpcr_dnz) {
464
        uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
465
        if (exp == 0) {
466
            val &= 1ull << 63;
467
        }
468
    }
469
    return val;
470
}
471

    
472
/* F floating (VAX) */
473
static inline uint64_t float32_to_f(float32 fa)
474
{
475
    uint64_t r, exp, mant, sig;
476
    CPU_FloatU a;
477

    
478
    a.f = fa;
479
    sig = ((uint64_t)a.l & 0x80000000) << 32;
480
    exp = (a.l >> 23) & 0xff;
481
    mant = ((uint64_t)a.l & 0x007fffff) << 29;
482

    
483
    if (exp == 255) {
484
        /* NaN or infinity */
485
        r = 1; /* VAX dirty zero */
486
    } else if (exp == 0) {
487
        if (mant == 0) {
488
            /* Zero */
489
            r = 0;
490
        } else {
491
            /* Denormalized */
492
            r = sig | ((exp + 1) << 52) | mant;
493
        }
494
    } else {
495
        if (exp >= 253) {
496
            /* Overflow */
497
            r = 1; /* VAX dirty zero */
498
        } else {
499
            r = sig | ((exp + 2) << 52);
500
        }
501
    }
502

    
503
    return r;
504
}
505

    
506
static inline float32 f_to_float32(uint64_t a)
507
{
508
    uint32_t exp, mant_sig;
509
    CPU_FloatU r;
510

    
511
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
512
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
513

    
514
    if (unlikely(!exp && mant_sig)) {
515
        /* Reserved operands / Dirty zero */
516
        helper_excp(EXCP_OPCDEC, 0);
517
    }
518

    
519
    if (exp < 3) {
520
        /* Underflow */
521
        r.l = 0;
522
    } else {
523
        r.l = ((exp - 2) << 23) | mant_sig;
524
    }
525

    
526
    return r.f;
527
}
528

    
529
uint32_t helper_f_to_memory (uint64_t a)
530
{
531
    uint32_t r;
532
    r =  (a & 0x00001fffe0000000ull) >> 13;
533
    r |= (a & 0x07ffe00000000000ull) >> 45;
534
    r |= (a & 0xc000000000000000ull) >> 48;
535
    return r;
536
}
537

    
538
uint64_t helper_memory_to_f (uint32_t a)
539
{
540
    uint64_t r;
541
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
542
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
543
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
544
    if (!(a & 0x00004000))
545
        r |= 0x7ll << 59;
546
    return r;
547
}
548

    
549
/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong.  We should
550
   either implement VAX arithmetic properly or just signal invalid opcode.  */
551

    
552
uint64_t helper_addf (uint64_t a, uint64_t b)
553
{
554
    float32 fa, fb, fr;
555

    
556
    fa = f_to_float32(a);
557
    fb = f_to_float32(b);
558
    fr = float32_add(fa, fb, &FP_STATUS);
559
    return float32_to_f(fr);
560
}
561

    
562
uint64_t helper_subf (uint64_t a, uint64_t b)
563
{
564
    float32 fa, fb, fr;
565

    
566
    fa = f_to_float32(a);
567
    fb = f_to_float32(b);
568
    fr = float32_sub(fa, fb, &FP_STATUS);
569
    return float32_to_f(fr);
570
}
571

    
572
uint64_t helper_mulf (uint64_t a, uint64_t b)
573
{
574
    float32 fa, fb, fr;
575

    
576
    fa = f_to_float32(a);
577
    fb = f_to_float32(b);
578
    fr = float32_mul(fa, fb, &FP_STATUS);
579
    return float32_to_f(fr);
580
}
581

    
582
uint64_t helper_divf (uint64_t a, uint64_t b)
583
{
584
    float32 fa, fb, fr;
585

    
586
    fa = f_to_float32(a);
587
    fb = f_to_float32(b);
588
    fr = float32_div(fa, fb, &FP_STATUS);
589
    return float32_to_f(fr);
590
}
591

    
592
uint64_t helper_sqrtf (uint64_t t)
593
{
594
    float32 ft, fr;
595

    
596
    ft = f_to_float32(t);
597
    fr = float32_sqrt(ft, &FP_STATUS);
598
    return float32_to_f(fr);
599
}
600

    
601

    
602
/* G floating (VAX) */
603
static inline uint64_t float64_to_g(float64 fa)
604
{
605
    uint64_t r, exp, mant, sig;
606
    CPU_DoubleU a;
607

    
608
    a.d = fa;
609
    sig = a.ll & 0x8000000000000000ull;
610
    exp = (a.ll >> 52) & 0x7ff;
611
    mant = a.ll & 0x000fffffffffffffull;
612

    
613
    if (exp == 2047) {
614
        /* NaN or infinity */
615
        r = 1; /* VAX dirty zero */
616
    } else if (exp == 0) {
617
        if (mant == 0) {
618
            /* Zero */
619
            r = 0;
620
        } else {
621
            /* Denormalized */
622
            r = sig | ((exp + 1) << 52) | mant;
623
        }
624
    } else {
625
        if (exp >= 2045) {
626
            /* Overflow */
627
            r = 1; /* VAX dirty zero */
628
        } else {
629
            r = sig | ((exp + 2) << 52);
630
        }
631
    }
632

    
633
    return r;
634
}
635

    
636
static inline float64 g_to_float64(uint64_t a)
637
{
638
    uint64_t exp, mant_sig;
639
    CPU_DoubleU r;
640

    
641
    exp = (a >> 52) & 0x7ff;
642
    mant_sig = a & 0x800fffffffffffffull;
643

    
644
    if (!exp && mant_sig) {
645
        /* Reserved operands / Dirty zero */
646
        helper_excp(EXCP_OPCDEC, 0);
647
    }
648

    
649
    if (exp < 3) {
650
        /* Underflow */
651
        r.ll = 0;
652
    } else {
653
        r.ll = ((exp - 2) << 52) | mant_sig;
654
    }
655

    
656
    return r.d;
657
}
658

    
659
uint64_t helper_g_to_memory (uint64_t a)
660
{
661
    uint64_t r;
662
    r =  (a & 0x000000000000ffffull) << 48;
663
    r |= (a & 0x00000000ffff0000ull) << 16;
664
    r |= (a & 0x0000ffff00000000ull) >> 16;
665
    r |= (a & 0xffff000000000000ull) >> 48;
666
    return r;
667
}
668

    
669
uint64_t helper_memory_to_g (uint64_t a)
670
{
671
    uint64_t r;
672
    r =  (a & 0x000000000000ffffull) << 48;
673
    r |= (a & 0x00000000ffff0000ull) << 16;
674
    r |= (a & 0x0000ffff00000000ull) >> 16;
675
    r |= (a & 0xffff000000000000ull) >> 48;
676
    return r;
677
}
678

    
679
uint64_t helper_addg (uint64_t a, uint64_t b)
680
{
681
    float64 fa, fb, fr;
682

    
683
    fa = g_to_float64(a);
684
    fb = g_to_float64(b);
685
    fr = float64_add(fa, fb, &FP_STATUS);
686
    return float64_to_g(fr);
687
}
688

    
689
uint64_t helper_subg (uint64_t a, uint64_t b)
690
{
691
    float64 fa, fb, fr;
692

    
693
    fa = g_to_float64(a);
694
    fb = g_to_float64(b);
695
    fr = float64_sub(fa, fb, &FP_STATUS);
696
    return float64_to_g(fr);
697
}
698

    
699
uint64_t helper_mulg (uint64_t a, uint64_t b)
700
{
701
    float64 fa, fb, fr;
702

    
703
    fa = g_to_float64(a);
704
    fb = g_to_float64(b);
705
    fr = float64_mul(fa, fb, &FP_STATUS);
706
    return float64_to_g(fr);
707
}
708

    
709
uint64_t helper_divg (uint64_t a, uint64_t b)
710
{
711
    float64 fa, fb, fr;
712

    
713
    fa = g_to_float64(a);
714
    fb = g_to_float64(b);
715
    fr = float64_div(fa, fb, &FP_STATUS);
716
    return float64_to_g(fr);
717
}
718

    
719
uint64_t helper_sqrtg (uint64_t a)
720
{
721
    float64 fa, fr;
722

    
723
    fa = g_to_float64(a);
724
    fr = float64_sqrt(fa, &FP_STATUS);
725
    return float64_to_g(fr);
726
}
727

    
728

    
729
/* S floating (single) */
730

    
731
/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg.  */
732
static inline uint64_t float32_to_s_int(uint32_t fi)
733
{
734
    uint32_t frac = fi & 0x7fffff;
735
    uint32_t sign = fi >> 31;
736
    uint32_t exp_msb = (fi >> 30) & 1;
737
    uint32_t exp_low = (fi >> 23) & 0x7f;
738
    uint32_t exp;
739

    
740
    exp = (exp_msb << 10) | exp_low;
741
    if (exp_msb) {
742
        if (exp_low == 0x7f)
743
            exp = 0x7ff;
744
    } else {
745
        if (exp_low != 0x00)
746
            exp |= 0x380;
747
    }
748

    
749
    return (((uint64_t)sign << 63)
750
            | ((uint64_t)exp << 52)
751
            | ((uint64_t)frac << 29));
752
}
753

    
754
static inline uint64_t float32_to_s(float32 fa)
755
{
756
    CPU_FloatU a;
757
    a.f = fa;
758
    return float32_to_s_int(a.l);
759
}
760

    
761
static inline uint32_t s_to_float32_int(uint64_t a)
762
{
763
    return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
764
}
765

    
766
static inline float32 s_to_float32(uint64_t a)
767
{
768
    CPU_FloatU r;
769
    r.l = s_to_float32_int(a);
770
    return r.f;
771
}
772

    
773
uint32_t helper_s_to_memory (uint64_t a)
774
{
775
    return s_to_float32_int(a);
776
}
777

    
778
uint64_t helper_memory_to_s (uint32_t a)
779
{
780
    return float32_to_s_int(a);
781
}
782

    
783
uint64_t helper_adds (uint64_t a, uint64_t b)
784
{
785
    float32 fa, fb, fr;
786

    
787
    fa = s_to_float32(a);
788
    fb = s_to_float32(b);
789
    fr = float32_add(fa, fb, &FP_STATUS);
790
    return float32_to_s(fr);
791
}
792

    
793
uint64_t helper_subs (uint64_t a, uint64_t b)
794
{
795
    float32 fa, fb, fr;
796

    
797
    fa = s_to_float32(a);
798
    fb = s_to_float32(b);
799
    fr = float32_sub(fa, fb, &FP_STATUS);
800
    return float32_to_s(fr);
801
}
802

    
803
uint64_t helper_muls (uint64_t a, uint64_t b)
804
{
805
    float32 fa, fb, fr;
806

    
807
    fa = s_to_float32(a);
808
    fb = s_to_float32(b);
809
    fr = float32_mul(fa, fb, &FP_STATUS);
810
    return float32_to_s(fr);
811
}
812

    
813
uint64_t helper_divs (uint64_t a, uint64_t b)
814
{
815
    float32 fa, fb, fr;
816

    
817
    fa = s_to_float32(a);
818
    fb = s_to_float32(b);
819
    fr = float32_div(fa, fb, &FP_STATUS);
820
    return float32_to_s(fr);
821
}
822

    
823
uint64_t helper_sqrts (uint64_t a)
824
{
825
    float32 fa, fr;
826

    
827
    fa = s_to_float32(a);
828
    fr = float32_sqrt(fa, &FP_STATUS);
829
    return float32_to_s(fr);
830
}
831

    
832

    
833
/* T floating (double) */
834
static inline float64 t_to_float64(uint64_t a)
835
{
836
    /* Memory format is the same as float64 */
837
    CPU_DoubleU r;
838
    r.ll = a;
839
    return r.d;
840
}
841

    
842
static inline uint64_t float64_to_t(float64 fa)
843
{
844
    /* Memory format is the same as float64 */
845
    CPU_DoubleU r;
846
    r.d = fa;
847
    return r.ll;
848
}
849

    
850
uint64_t helper_addt (uint64_t a, uint64_t b)
851
{
852
    float64 fa, fb, fr;
853

    
854
    fa = t_to_float64(a);
855
    fb = t_to_float64(b);
856
    fr = float64_add(fa, fb, &FP_STATUS);
857
    return float64_to_t(fr);
858
}
859

    
860
uint64_t helper_subt (uint64_t a, uint64_t b)
861
{
862
    float64 fa, fb, fr;
863

    
864
    fa = t_to_float64(a);
865
    fb = t_to_float64(b);
866
    fr = float64_sub(fa, fb, &FP_STATUS);
867
    return float64_to_t(fr);
868
}
869

    
870
uint64_t helper_mult (uint64_t a, uint64_t b)
871
{
872
    float64 fa, fb, fr;
873

    
874
    fa = t_to_float64(a);
875
    fb = t_to_float64(b);
876
    fr = float64_mul(fa, fb, &FP_STATUS);
877
    return float64_to_t(fr);
878
}
879

    
880
uint64_t helper_divt (uint64_t a, uint64_t b)
881
{
882
    float64 fa, fb, fr;
883

    
884
    fa = t_to_float64(a);
885
    fb = t_to_float64(b);
886
    fr = float64_div(fa, fb, &FP_STATUS);
887
    return float64_to_t(fr);
888
}
889

    
890
uint64_t helper_sqrtt (uint64_t a)
891
{
892
    float64 fa, fr;
893

    
894
    fa = t_to_float64(a);
895
    fr = float64_sqrt(fa, &FP_STATUS);
896
    return float64_to_t(fr);
897
}
898

    
899
/* Comparisons */
900
uint64_t helper_cmptun (uint64_t a, uint64_t b)
901
{
902
    float64 fa, fb;
903

    
904
    fa = t_to_float64(a);
905
    fb = t_to_float64(b);
906

    
907
    if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
908
        return 0x4000000000000000ULL;
909
    } else {
910
        return 0;
911
    }
912
}
913

    
914
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
915
{
916
    float64 fa, fb;
917

    
918
    fa = t_to_float64(a);
919
    fb = t_to_float64(b);
920

    
921
    if (float64_eq(fa, fb, &FP_STATUS))
922
        return 0x4000000000000000ULL;
923
    else
924
        return 0;
925
}
926

    
927
uint64_t helper_cmptle(uint64_t a, uint64_t b)
928
{
929
    float64 fa, fb;
930

    
931
    fa = t_to_float64(a);
932
    fb = t_to_float64(b);
933

    
934
    if (float64_le(fa, fb, &FP_STATUS))
935
        return 0x4000000000000000ULL;
936
    else
937
        return 0;
938
}
939

    
940
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
941
{
942
    float64 fa, fb;
943

    
944
    fa = t_to_float64(a);
945
    fb = t_to_float64(b);
946

    
947
    if (float64_lt(fa, fb, &FP_STATUS))
948
        return 0x4000000000000000ULL;
949
    else
950
        return 0;
951
}
952

    
953
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
954
{
955
    float64 fa, fb;
956

    
957
    fa = g_to_float64(a);
958
    fb = g_to_float64(b);
959

    
960
    if (float64_eq(fa, fb, &FP_STATUS))
961
        return 0x4000000000000000ULL;
962
    else
963
        return 0;
964
}
965

    
966
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
967
{
968
    float64 fa, fb;
969

    
970
    fa = g_to_float64(a);
971
    fb = g_to_float64(b);
972

    
973
    if (float64_le(fa, fb, &FP_STATUS))
974
        return 0x4000000000000000ULL;
975
    else
976
        return 0;
977
}
978

    
979
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
980
{
981
    float64 fa, fb;
982

    
983
    fa = g_to_float64(a);
984
    fb = g_to_float64(b);
985

    
986
    if (float64_lt(fa, fb, &FP_STATUS))
987
        return 0x4000000000000000ULL;
988
    else
989
        return 0;
990
}
991

    
992
/* Floating point format conversion */
993
uint64_t helper_cvtts (uint64_t a)
994
{
995
    float64 fa;
996
    float32 fr;
997

    
998
    fa = t_to_float64(a);
999
    fr = float64_to_float32(fa, &FP_STATUS);
1000
    return float32_to_s(fr);
1001
}
1002

    
1003
uint64_t helper_cvtst (uint64_t a)
1004
{
1005
    float32 fa;
1006
    float64 fr;
1007

    
1008
    fa = s_to_float32(a);
1009
    fr = float32_to_float64(fa, &FP_STATUS);
1010
    return float64_to_t(fr);
1011
}
1012

    
1013
uint64_t helper_cvtqs (uint64_t a)
1014
{
1015
    float32 fr = int64_to_float32(a, &FP_STATUS);
1016
    return float32_to_s(fr);
1017
}
1018

    
1019
/* Implement float64 to uint64 conversion without saturation -- we must
1020
   supply the truncated result.  This behaviour is used by the compiler
1021
   to get unsigned conversion for free with the same instruction.
1022

1023
   The VI flag is set when overflow or inexact exceptions should be raised.  */
1024

    
1025
static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1026
{
1027
    uint64_t frac, ret = 0;
1028
    uint32_t exp, sign, exc = 0;
1029
    int shift;
1030

    
1031
    sign = (a >> 63);
1032
    exp = (uint32_t)(a >> 52) & 0x7ff;
1033
    frac = a & 0xfffffffffffffull;
1034

    
1035
    if (exp == 0) {
1036
        if (unlikely(frac != 0)) {
1037
            goto do_underflow;
1038
        }
1039
    } else if (exp == 0x7ff) {
1040
        exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1041
    } else {
1042
        /* Restore implicit bit.  */
1043
        frac |= 0x10000000000000ull;
1044

    
1045
        shift = exp - 1023 - 52;
1046
        if (shift >= 0) {
1047
            /* In this case the number is so large that we must shift
1048
               the fraction left.  There is no rounding to do.  */
1049
            if (shift < 63) {
1050
                ret = frac << shift;
1051
                if (VI && (ret >> shift) != frac) {
1052
                    exc = float_flag_overflow;
1053
                }
1054
            }
1055
        } else {
1056
            uint64_t round;
1057

    
1058
            /* In this case the number is smaller than the fraction as
1059
               represented by the 52 bit number.  Here we must think
1060
               about rounding the result.  Handle this by shifting the
1061
               fractional part of the number into the high bits of ROUND.
1062
               This will let us efficiently handle round-to-nearest.  */
1063
            shift = -shift;
1064
            if (shift < 63) {
1065
                ret = frac >> shift;
1066
                round = frac << (64 - shift);
1067
            } else {
1068
                /* The exponent is so small we shift out everything.
1069
                   Leave a sticky bit for proper rounding below.  */
1070
            do_underflow:
1071
                round = 1;
1072
            }
1073

    
1074
            if (round) {
1075
                exc = (VI ? float_flag_inexact : 0);
1076
                switch (roundmode) {
1077
                case float_round_nearest_even:
1078
                    if (round == (1ull << 63)) {
1079
                        /* Fraction is exactly 0.5; round to even.  */
1080
                        ret += (ret & 1);
1081
                    } else if (round > (1ull << 63)) {
1082
                        ret += 1;
1083
                    }
1084
                    break;
1085
                case float_round_to_zero:
1086
                    break;
1087
                case float_round_up:
1088
                    ret += 1 - sign;
1089
                    break;
1090
                case float_round_down:
1091
                    ret += sign;
1092
                    break;
1093
                }
1094
            }
1095
        }
1096
        if (sign) {
1097
            ret = -ret;
1098
        }
1099
    }
1100
    if (unlikely(exc)) {
1101
        float_raise(exc, &FP_STATUS);
1102
    }
1103

    
1104
    return ret;
1105
}
1106

    
1107
uint64_t helper_cvttq(uint64_t a)
1108
{
1109
    return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1110
}
1111

    
1112
uint64_t helper_cvttq_c(uint64_t a)
1113
{
1114
    return helper_cvttq_internal(a, float_round_to_zero, 0);
1115
}
1116

    
1117
uint64_t helper_cvttq_svic(uint64_t a)
1118
{
1119
    return helper_cvttq_internal(a, float_round_to_zero, 1);
1120
}
1121

    
1122
uint64_t helper_cvtqt (uint64_t a)
1123
{
1124
    float64 fr = int64_to_float64(a, &FP_STATUS);
1125
    return float64_to_t(fr);
1126
}
1127

    
1128
uint64_t helper_cvtqf (uint64_t a)
1129
{
1130
    float32 fr = int64_to_float32(a, &FP_STATUS);
1131
    return float32_to_f(fr);
1132
}
1133

    
1134
uint64_t helper_cvtgf (uint64_t a)
1135
{
1136
    float64 fa;
1137
    float32 fr;
1138

    
1139
    fa = g_to_float64(a);
1140
    fr = float64_to_float32(fa, &FP_STATUS);
1141
    return float32_to_f(fr);
1142
}
1143

    
1144
uint64_t helper_cvtgq (uint64_t a)
1145
{
1146
    float64 fa = g_to_float64(a);
1147
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1148
}
1149

    
1150
uint64_t helper_cvtqg (uint64_t a)
1151
{
1152
    float64 fr;
1153
    fr = int64_to_float64(a, &FP_STATUS);
1154
    return float64_to_g(fr);
1155
}
1156

    
1157
/* PALcode support special instructions */
1158
#if !defined (CONFIG_USER_ONLY)
1159
void helper_hw_rei (void)
1160
{
1161
    env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1162
    env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1163
    env->intr_flag = 0;
1164
    env->lock_addr = -1;
1165
    /* XXX: re-enable interrupts and memory mapping */
1166
}
1167

    
1168
void helper_hw_ret (uint64_t a)
1169
{
1170
    env->pc = a & ~3;
1171
    env->ipr[IPR_EXC_ADDR] = a & 1;
1172
    env->intr_flag = 0;
1173
    env->lock_addr = -1;
1174
    /* XXX: re-enable interrupts and memory mapping */
1175
}
1176

    
1177
uint64_t helper_mfpr (int iprn, uint64_t val)
1178
{
1179
    uint64_t tmp;
1180

    
1181
    if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1182
        val = tmp;
1183

    
1184
    return val;
1185
}
1186

    
1187
void helper_mtpr (int iprn, uint64_t val)
1188
{
1189
    cpu_alpha_mtpr(env, iprn, val, NULL);
1190
}
1191

    
1192
void helper_set_alt_mode (void)
1193
{
1194
    env->saved_mode = env->ps & 0xC;
1195
    env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1196
}
1197

    
1198
void helper_restore_mode (void)
1199
{
1200
    env->ps = (env->ps & ~0xC) | env->saved_mode;
1201
}
1202

    
1203
#endif
1204

    
1205
/*****************************************************************************/
1206
/* Softmmu support */
1207
#if !defined (CONFIG_USER_ONLY)
1208

    
1209
/* XXX: the two following helpers are pure hacks.
1210
 *      Hopefully, we emulate the PALcode, then we should never see
1211
 *      HW_LD / HW_ST instructions.
1212
 */
1213
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1214
{
1215
    uint64_t tlb_addr, physaddr;
1216
    int index, mmu_idx;
1217
    void *retaddr;
1218

    
1219
    mmu_idx = cpu_mmu_index(env);
1220
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1221
 redo:
1222
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1223
    if ((virtaddr & TARGET_PAGE_MASK) ==
1224
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1225
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1226
    } else {
1227
        /* the page is not in the TLB : fill it */
1228
        retaddr = GETPC();
1229
        tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1230
        goto redo;
1231
    }
1232
    return physaddr;
1233
}
1234

    
1235
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1236
{
1237
    uint64_t tlb_addr, physaddr;
1238
    int index, mmu_idx;
1239
    void *retaddr;
1240

    
1241
    mmu_idx = cpu_mmu_index(env);
1242
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1243
 redo:
1244
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1245
    if ((virtaddr & TARGET_PAGE_MASK) ==
1246
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1247
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1248
    } else {
1249
        /* the page is not in the TLB : fill it */
1250
        retaddr = GETPC();
1251
        tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1252
        goto redo;
1253
    }
1254
    return physaddr;
1255
}
1256

    
1257
void helper_ldl_raw(uint64_t t0, uint64_t t1)
1258
{
1259
    ldl_raw(t1, t0);
1260
}
1261

    
1262
void helper_ldq_raw(uint64_t t0, uint64_t t1)
1263
{
1264
    ldq_raw(t1, t0);
1265
}
1266

    
1267
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1268
{
1269
    env->lock = t1;
1270
    ldl_raw(t1, t0);
1271
}
1272

    
1273
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1274
{
1275
    env->lock = t1;
1276
    ldl_raw(t1, t0);
1277
}
1278

    
1279
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1280
{
1281
    ldl_kernel(t1, t0);
1282
}
1283

    
1284
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1285
{
1286
    ldq_kernel(t1, t0);
1287
}
1288

    
1289
void helper_ldl_data(uint64_t t0, uint64_t t1)
1290
{
1291
    ldl_data(t1, t0);
1292
}
1293

    
1294
void helper_ldq_data(uint64_t t0, uint64_t t1)
1295
{
1296
    ldq_data(t1, t0);
1297
}
1298

    
1299
void helper_stl_raw(uint64_t t0, uint64_t t1)
1300
{
1301
    stl_raw(t1, t0);
1302
}
1303

    
1304
void helper_stq_raw(uint64_t t0, uint64_t t1)
1305
{
1306
    stq_raw(t1, t0);
1307
}
1308

    
1309
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1310
{
1311
    uint64_t ret;
1312

    
1313
    if (t1 == env->lock) {
1314
        stl_raw(t1, t0);
1315
        ret = 0;
1316
    } else
1317
        ret = 1;
1318

    
1319
    env->lock = 1;
1320

    
1321
    return ret;
1322
}
1323

    
1324
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1325
{
1326
    uint64_t ret;
1327

    
1328
    if (t1 == env->lock) {
1329
        stq_raw(t1, t0);
1330
        ret = 0;
1331
    } else
1332
        ret = 1;
1333

    
1334
    env->lock = 1;
1335

    
1336
    return ret;
1337
}
1338

    
1339
#define MMUSUFFIX _mmu
1340

    
1341
#define SHIFT 0
1342
#include "softmmu_template.h"
1343

    
1344
#define SHIFT 1
1345
#include "softmmu_template.h"
1346

    
1347
#define SHIFT 2
1348
#include "softmmu_template.h"
1349

    
1350
#define SHIFT 3
1351
#include "softmmu_template.h"
1352

    
1353
/* try to fill the TLB and return an exception if error. If retaddr is
1354
   NULL, it means that the function was called in C code (i.e. not
1355
   from generated code or from helper.c) */
1356
/* XXX: fix it to restore all registers */
1357
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1358
{
1359
    TranslationBlock *tb;
1360
    CPUState *saved_env;
1361
    unsigned long pc;
1362
    int ret;
1363

    
1364
    /* XXX: hack to restore env in all cases, even if not called from
1365
       generated code */
1366
    saved_env = env;
1367
    env = cpu_single_env;
1368
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1369
    if (!likely(ret == 0)) {
1370
        if (likely(retaddr)) {
1371
            /* now we have a real cpu fault */
1372
            pc = (unsigned long)retaddr;
1373
            tb = tb_find_pc(pc);
1374
            if (likely(tb)) {
1375
                /* the PC is inside the translated code. It means that we have
1376
                   a virtual CPU fault */
1377
                cpu_restore_state(tb, env, pc, NULL);
1378
            }
1379
        }
1380
        /* Exception index and error code are already set */
1381
        cpu_loop_exit();
1382
    }
1383
    env = saved_env;
1384
}
1385

    
1386
#endif