Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 248c42f3

History | View | Annotate | Download (26.5 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "softfloat.h"
23
#include "helper.h"
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27
void helper_excp (int excp, int error)
28
{
29
    env->exception_index = excp;
30
    env->error_code = error;
31
    cpu_loop_exit();
32
}
33

    
34
uint64_t helper_load_pcc (void)
35
{
36
    /* XXX: TODO */
37
    return 0;
38
}
39

    
40
uint64_t helper_load_fpcr (void)
41
{
42
    return cpu_alpha_load_fpcr (env);
43
}
44

    
45
void helper_store_fpcr (uint64_t val)
46
{
47
    cpu_alpha_store_fpcr (env, val);
48
}
49

    
50
static spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
51

    
52
uint64_t helper_rs(void)
53
{
54
    uint64_t tmp;
55

    
56
    spin_lock(&intr_cpu_lock);
57
    tmp = env->intr_flag;
58
    env->intr_flag = 1;
59
    spin_unlock(&intr_cpu_lock);
60

    
61
    return tmp;
62
}
63

    
64
uint64_t helper_rc(void)
65
{
66
    uint64_t tmp;
67

    
68
    spin_lock(&intr_cpu_lock);
69
    tmp = env->intr_flag;
70
    env->intr_flag = 0;
71
    spin_unlock(&intr_cpu_lock);
72

    
73
    return tmp;
74
}
75

    
76
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
77
{
78
    uint64_t tmp = op1;
79
    op1 += op2;
80
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
81
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
82
    }
83
    return op1;
84
}
85

    
86
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
87
{
88
    uint64_t tmp = op1;
89
    op1 = (uint32_t)(op1 + op2);
90
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
91
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
92
    }
93
    return op1;
94
}
95

    
96
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
97
{
98
    uint64_t res;
99
    res = op1 - op2;
100
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
101
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
102
    }
103
    return res;
104
}
105

    
106
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
107
{
108
    uint32_t res;
109
    res = op1 - op2;
110
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
111
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
112
    }
113
    return res;
114
}
115

    
116
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
117
{
118
    int64_t res = (int64_t)op1 * (int64_t)op2;
119

    
120
    if (unlikely((int32_t)res != res)) {
121
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
122
    }
123
    return (int64_t)((int32_t)res);
124
}
125

    
126
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
127
{
128
    uint64_t tl, th;
129

    
130
    muls64(&tl, &th, op1, op2);
131
    /* If th != 0 && th != -1, then we had an overflow */
132
    if (unlikely((th + 1) > 1)) {
133
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
134
    }
135
    return tl;
136
}
137

    
138
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
139
{
140
    uint64_t tl, th;
141

    
142
    mulu64(&tl, &th, op1, op2);
143
    return th;
144
}
145

    
146
uint64_t helper_ctpop (uint64_t arg)
147
{
148
    return ctpop64(arg);
149
}
150

    
151
uint64_t helper_ctlz (uint64_t arg)
152
{
153
    return clz64(arg);
154
}
155

    
156
uint64_t helper_cttz (uint64_t arg)
157
{
158
    return ctz64(arg);
159
}
160

    
161
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
162
{
163
    uint64_t mask;
164

    
165
    mask = 0;
166
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
167
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
168
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
169
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
170
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
171
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
172
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
173
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
174

    
175
    return op & ~mask;
176
}
177

    
178
uint64_t helper_mskbl(uint64_t val, uint64_t mask)
179
{
180
    return byte_zap(val, 0x01 << (mask & 7));
181
}
182

    
183
uint64_t helper_mskwl(uint64_t val, uint64_t mask)
184
{
185
    return byte_zap(val, 0x03 << (mask & 7));
186
}
187

    
188
uint64_t helper_mskll(uint64_t val, uint64_t mask)
189
{
190
    return byte_zap(val, 0x0F << (mask & 7));
191
}
192

    
193
uint64_t helper_zap(uint64_t val, uint64_t mask)
194
{
195
    return byte_zap(val, mask);
196
}
197

    
198
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
199
{
200
    return byte_zap(val, ~mask);
201
}
202

    
203
uint64_t helper_mskql(uint64_t val, uint64_t mask)
204
{
205
    return byte_zap(val, 0xFF << (mask & 7));
206
}
207

    
208
uint64_t helper_mskwh(uint64_t val, uint64_t mask)
209
{
210
    return byte_zap(val, (0x03 << (mask & 7)) >> 8);
211
}
212

    
213
uint64_t helper_inswh(uint64_t val, uint64_t mask)
214
{
215
    val >>= 64 - ((mask & 7) * 8);
216
    return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
217
}
218

    
219
uint64_t helper_msklh(uint64_t val, uint64_t mask)
220
{
221
    return byte_zap(val, (0x0F << (mask & 7)) >> 8);
222
}
223

    
224
uint64_t helper_inslh(uint64_t val, uint64_t mask)
225
{
226
    val >>= 64 - ((mask & 7) * 8);
227
    return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
228
}
229

    
230
uint64_t helper_mskqh(uint64_t val, uint64_t mask)
231
{
232
    return byte_zap(val, (0xFF << (mask & 7)) >> 8);
233
}
234

    
235
uint64_t helper_insqh(uint64_t val, uint64_t mask)
236
{
237
    val >>= 64 - ((mask & 7) * 8);
238
    return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
239
}
240

    
241
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
242
{
243
    uint8_t opa, opb, res;
244
    int i;
245

    
246
    res = 0;
247
    for (i = 0; i < 8; i++) {
248
        opa = op1 >> (i * 8);
249
        opb = op2 >> (i * 8);
250
        if (opa >= opb)
251
            res |= 1 << i;
252
    }
253
    return res;
254
}
255

    
256
uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
257
{
258
    uint64_t res = 0;
259
    uint8_t opa, opb, opr;
260
    int i;
261

    
262
    for (i = 0; i < 8; ++i) {
263
        opa = op1 >> (i * 8);
264
        opb = op2 >> (i * 8);
265
        opr = opa < opb ? opa : opb;
266
        res |= (uint64_t)opr << (i * 8);
267
    }
268
    return res;
269
}
270

    
271
uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
272
{
273
    uint64_t res = 0;
274
    int8_t opa, opb;
275
    uint8_t opr;
276
    int i;
277

    
278
    for (i = 0; i < 8; ++i) {
279
        opa = op1 >> (i * 8);
280
        opb = op2 >> (i * 8);
281
        opr = opa < opb ? opa : opb;
282
        res |= (uint64_t)opr << (i * 8);
283
    }
284
    return res;
285
}
286

    
287
uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
288
{
289
    uint64_t res = 0;
290
    uint16_t opa, opb, opr;
291
    int i;
292

    
293
    for (i = 0; i < 4; ++i) {
294
        opa = op1 >> (i * 16);
295
        opb = op2 >> (i * 16);
296
        opr = opa < opb ? opa : opb;
297
        res |= (uint64_t)opr << (i * 16);
298
    }
299
    return res;
300
}
301

    
302
uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
303
{
304
    uint64_t res = 0;
305
    int16_t opa, opb;
306
    uint16_t opr;
307
    int i;
308

    
309
    for (i = 0; i < 4; ++i) {
310
        opa = op1 >> (i * 16);
311
        opb = op2 >> (i * 16);
312
        opr = opa < opb ? opa : opb;
313
        res |= (uint64_t)opr << (i * 16);
314
    }
315
    return res;
316
}
317

    
318
uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
319
{
320
    uint64_t res = 0;
321
    uint8_t opa, opb, opr;
322
    int i;
323

    
324
    for (i = 0; i < 8; ++i) {
325
        opa = op1 >> (i * 8);
326
        opb = op2 >> (i * 8);
327
        opr = opa > opb ? opa : opb;
328
        res |= (uint64_t)opr << (i * 8);
329
    }
330
    return res;
331
}
332

    
333
uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
334
{
335
    uint64_t res = 0;
336
    int8_t opa, opb;
337
    uint8_t opr;
338
    int i;
339

    
340
    for (i = 0; i < 8; ++i) {
341
        opa = op1 >> (i * 8);
342
        opb = op2 >> (i * 8);
343
        opr = opa > opb ? opa : opb;
344
        res |= (uint64_t)opr << (i * 8);
345
    }
346
    return res;
347
}
348

    
349
uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
350
{
351
    uint64_t res = 0;
352
    uint16_t opa, opb, opr;
353
    int i;
354

    
355
    for (i = 0; i < 4; ++i) {
356
        opa = op1 >> (i * 16);
357
        opb = op2 >> (i * 16);
358
        opr = opa > opb ? opa : opb;
359
        res |= (uint64_t)opr << (i * 16);
360
    }
361
    return res;
362
}
363

    
364
uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
365
{
366
    uint64_t res = 0;
367
    int16_t opa, opb;
368
    uint16_t opr;
369
    int i;
370

    
371
    for (i = 0; i < 4; ++i) {
372
        opa = op1 >> (i * 16);
373
        opb = op2 >> (i * 16);
374
        opr = opa > opb ? opa : opb;
375
        res |= (uint64_t)opr << (i * 16);
376
    }
377
    return res;
378
}
379

    
380
uint64_t helper_perr (uint64_t op1, uint64_t op2)
381
{
382
    uint64_t res = 0;
383
    uint8_t opa, opb, opr;
384
    int i;
385

    
386
    for (i = 0; i < 8; ++i) {
387
        opa = op1 >> (i * 8);
388
        opb = op2 >> (i * 8);
389
        if (opa >= opb)
390
            opr = opa - opb;
391
        else
392
            opr = opb - opa;
393
        res += opr;
394
    }
395
    return res;
396
}
397

    
398
uint64_t helper_pklb (uint64_t op1)
399
{
400
    return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
401
}
402

    
403
uint64_t helper_pkwb (uint64_t op1)
404
{
405
    return ((op1 & 0xff)
406
            | ((op1 >> 8) & 0xff00)
407
            | ((op1 >> 16) & 0xff0000)
408
            | ((op1 >> 24) & 0xff000000));
409
}
410

    
411
uint64_t helper_unpkbl (uint64_t op1)
412
{
413
    return (op1 & 0xff) | ((op1 & 0xff00) << 24);
414
}
415

    
416
uint64_t helper_unpkbw (uint64_t op1)
417
{
418
    return ((op1 & 0xff)
419
            | ((op1 & 0xff00) << 8)
420
            | ((op1 & 0xff0000) << 16)
421
            | ((op1 & 0xff000000) << 24));
422
}
423

    
424
/* Floating point helpers */
425

    
426
/* F floating (VAX) */
427
static inline uint64_t float32_to_f(float32 fa)
428
{
429
    uint64_t r, exp, mant, sig;
430
    CPU_FloatU a;
431

    
432
    a.f = fa;
433
    sig = ((uint64_t)a.l & 0x80000000) << 32;
434
    exp = (a.l >> 23) & 0xff;
435
    mant = ((uint64_t)a.l & 0x007fffff) << 29;
436

    
437
    if (exp == 255) {
438
        /* NaN or infinity */
439
        r = 1; /* VAX dirty zero */
440
    } else if (exp == 0) {
441
        if (mant == 0) {
442
            /* Zero */
443
            r = 0;
444
        } else {
445
            /* Denormalized */
446
            r = sig | ((exp + 1) << 52) | mant;
447
        }
448
    } else {
449
        if (exp >= 253) {
450
            /* Overflow */
451
            r = 1; /* VAX dirty zero */
452
        } else {
453
            r = sig | ((exp + 2) << 52);
454
        }
455
    }
456

    
457
    return r;
458
}
459

    
460
static inline float32 f_to_float32(uint64_t a)
461
{
462
    uint32_t exp, mant_sig;
463
    CPU_FloatU r;
464

    
465
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
466
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
467

    
468
    if (unlikely(!exp && mant_sig)) {
469
        /* Reserved operands / Dirty zero */
470
        helper_excp(EXCP_OPCDEC, 0);
471
    }
472

    
473
    if (exp < 3) {
474
        /* Underflow */
475
        r.l = 0;
476
    } else {
477
        r.l = ((exp - 2) << 23) | mant_sig;
478
    }
479

    
480
    return r.f;
481
}
482

    
483
uint32_t helper_f_to_memory (uint64_t a)
484
{
485
    uint32_t r;
486
    r =  (a & 0x00001fffe0000000ull) >> 13;
487
    r |= (a & 0x07ffe00000000000ull) >> 45;
488
    r |= (a & 0xc000000000000000ull) >> 48;
489
    return r;
490
}
491

    
492
uint64_t helper_memory_to_f (uint32_t a)
493
{
494
    uint64_t r;
495
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
496
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
497
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
498
    if (!(a & 0x00004000))
499
        r |= 0x7ll << 59;
500
    return r;
501
}
502

    
503
uint64_t helper_addf (uint64_t a, uint64_t b)
504
{
505
    float32 fa, fb, fr;
506

    
507
    fa = f_to_float32(a);
508
    fb = f_to_float32(b);
509
    fr = float32_add(fa, fb, &FP_STATUS);
510
    return float32_to_f(fr);
511
}
512

    
513
uint64_t helper_subf (uint64_t a, uint64_t b)
514
{
515
    float32 fa, fb, fr;
516

    
517
    fa = f_to_float32(a);
518
    fb = f_to_float32(b);
519
    fr = float32_sub(fa, fb, &FP_STATUS);
520
    return float32_to_f(fr);
521
}
522

    
523
uint64_t helper_mulf (uint64_t a, uint64_t b)
524
{
525
    float32 fa, fb, fr;
526

    
527
    fa = f_to_float32(a);
528
    fb = f_to_float32(b);
529
    fr = float32_mul(fa, fb, &FP_STATUS);
530
    return float32_to_f(fr);
531
}
532

    
533
uint64_t helper_divf (uint64_t a, uint64_t b)
534
{
535
    float32 fa, fb, fr;
536

    
537
    fa = f_to_float32(a);
538
    fb = f_to_float32(b);
539
    fr = float32_div(fa, fb, &FP_STATUS);
540
    return float32_to_f(fr);
541
}
542

    
543
uint64_t helper_sqrtf (uint64_t t)
544
{
545
    float32 ft, fr;
546

    
547
    ft = f_to_float32(t);
548
    fr = float32_sqrt(ft, &FP_STATUS);
549
    return float32_to_f(fr);
550
}
551

    
552

    
553
/* G floating (VAX) */
554
static inline uint64_t float64_to_g(float64 fa)
555
{
556
    uint64_t r, exp, mant, sig;
557
    CPU_DoubleU a;
558

    
559
    a.d = fa;
560
    sig = a.ll & 0x8000000000000000ull;
561
    exp = (a.ll >> 52) & 0x7ff;
562
    mant = a.ll & 0x000fffffffffffffull;
563

    
564
    if (exp == 2047) {
565
        /* NaN or infinity */
566
        r = 1; /* VAX dirty zero */
567
    } else if (exp == 0) {
568
        if (mant == 0) {
569
            /* Zero */
570
            r = 0;
571
        } else {
572
            /* Denormalized */
573
            r = sig | ((exp + 1) << 52) | mant;
574
        }
575
    } else {
576
        if (exp >= 2045) {
577
            /* Overflow */
578
            r = 1; /* VAX dirty zero */
579
        } else {
580
            r = sig | ((exp + 2) << 52);
581
        }
582
    }
583

    
584
    return r;
585
}
586

    
587
static inline float64 g_to_float64(uint64_t a)
588
{
589
    uint64_t exp, mant_sig;
590
    CPU_DoubleU r;
591

    
592
    exp = (a >> 52) & 0x7ff;
593
    mant_sig = a & 0x800fffffffffffffull;
594

    
595
    if (!exp && mant_sig) {
596
        /* Reserved operands / Dirty zero */
597
        helper_excp(EXCP_OPCDEC, 0);
598
    }
599

    
600
    if (exp < 3) {
601
        /* Underflow */
602
        r.ll = 0;
603
    } else {
604
        r.ll = ((exp - 2) << 52) | mant_sig;
605
    }
606

    
607
    return r.d;
608
}
609

    
610
uint64_t helper_g_to_memory (uint64_t a)
611
{
612
    uint64_t r;
613
    r =  (a & 0x000000000000ffffull) << 48;
614
    r |= (a & 0x00000000ffff0000ull) << 16;
615
    r |= (a & 0x0000ffff00000000ull) >> 16;
616
    r |= (a & 0xffff000000000000ull) >> 48;
617
    return r;
618
}
619

    
620
uint64_t helper_memory_to_g (uint64_t a)
621
{
622
    uint64_t r;
623
    r =  (a & 0x000000000000ffffull) << 48;
624
    r |= (a & 0x00000000ffff0000ull) << 16;
625
    r |= (a & 0x0000ffff00000000ull) >> 16;
626
    r |= (a & 0xffff000000000000ull) >> 48;
627
    return r;
628
}
629

    
630
uint64_t helper_addg (uint64_t a, uint64_t b)
631
{
632
    float64 fa, fb, fr;
633

    
634
    fa = g_to_float64(a);
635
    fb = g_to_float64(b);
636
    fr = float64_add(fa, fb, &FP_STATUS);
637
    return float64_to_g(fr);
638
}
639

    
640
uint64_t helper_subg (uint64_t a, uint64_t b)
641
{
642
    float64 fa, fb, fr;
643

    
644
    fa = g_to_float64(a);
645
    fb = g_to_float64(b);
646
    fr = float64_sub(fa, fb, &FP_STATUS);
647
    return float64_to_g(fr);
648
}
649

    
650
uint64_t helper_mulg (uint64_t a, uint64_t b)
651
{
652
    float64 fa, fb, fr;
653

    
654
    fa = g_to_float64(a);
655
    fb = g_to_float64(b);
656
    fr = float64_mul(fa, fb, &FP_STATUS);
657
    return float64_to_g(fr);
658
}
659

    
660
uint64_t helper_divg (uint64_t a, uint64_t b)
661
{
662
    float64 fa, fb, fr;
663

    
664
    fa = g_to_float64(a);
665
    fb = g_to_float64(b);
666
    fr = float64_div(fa, fb, &FP_STATUS);
667
    return float64_to_g(fr);
668
}
669

    
670
uint64_t helper_sqrtg (uint64_t a)
671
{
672
    float64 fa, fr;
673

    
674
    fa = g_to_float64(a);
675
    fr = float64_sqrt(fa, &FP_STATUS);
676
    return float64_to_g(fr);
677
}
678

    
679

    
680
/* S floating (single) */
681
static inline uint64_t float32_to_s(float32 fa)
682
{
683
    CPU_FloatU a;
684
    uint64_t r;
685

    
686
    a.f = fa;
687

    
688
    r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
689
    if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
690
        r |= 0x7ll << 59;
691
    return r;
692
}
693

    
694
static inline float32 s_to_float32(uint64_t a)
695
{
696
    CPU_FloatU r;
697
    r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
698
    return r.f;
699
}
700

    
701
uint32_t helper_s_to_memory (uint64_t a)
702
{
703
    /* Memory format is the same as float32 */
704
    float32 fa = s_to_float32(a);
705
    return *(uint32_t*)(&fa);
706
}
707

    
708
uint64_t helper_memory_to_s (uint32_t a)
709
{
710
    /* Memory format is the same as float32 */
711
    return float32_to_s(*(float32*)(&a));
712
}
713

    
714
uint64_t helper_adds (uint64_t a, uint64_t b)
715
{
716
    float32 fa, fb, fr;
717

    
718
    fa = s_to_float32(a);
719
    fb = s_to_float32(b);
720
    fr = float32_add(fa, fb, &FP_STATUS);
721
    return float32_to_s(fr);
722
}
723

    
724
uint64_t helper_subs (uint64_t a, uint64_t b)
725
{
726
    float32 fa, fb, fr;
727

    
728
    fa = s_to_float32(a);
729
    fb = s_to_float32(b);
730
    fr = float32_sub(fa, fb, &FP_STATUS);
731
    return float32_to_s(fr);
732
}
733

    
734
uint64_t helper_muls (uint64_t a, uint64_t b)
735
{
736
    float32 fa, fb, fr;
737

    
738
    fa = s_to_float32(a);
739
    fb = s_to_float32(b);
740
    fr = float32_mul(fa, fb, &FP_STATUS);
741
    return float32_to_s(fr);
742
}
743

    
744
uint64_t helper_divs (uint64_t a, uint64_t b)
745
{
746
    float32 fa, fb, fr;
747

    
748
    fa = s_to_float32(a);
749
    fb = s_to_float32(b);
750
    fr = float32_div(fa, fb, &FP_STATUS);
751
    return float32_to_s(fr);
752
}
753

    
754
uint64_t helper_sqrts (uint64_t a)
755
{
756
    float32 fa, fr;
757

    
758
    fa = s_to_float32(a);
759
    fr = float32_sqrt(fa, &FP_STATUS);
760
    return float32_to_s(fr);
761
}
762

    
763

    
764
/* T floating (double) */
765
static inline float64 t_to_float64(uint64_t a)
766
{
767
    /* Memory format is the same as float64 */
768
    CPU_DoubleU r;
769
    r.ll = a;
770
    return r.d;
771
}
772

    
773
static inline uint64_t float64_to_t(float64 fa)
774
{
775
    /* Memory format is the same as float64 */
776
    CPU_DoubleU r;
777
    r.d = fa;
778
    return r.ll;
779
}
780

    
781
uint64_t helper_addt (uint64_t a, uint64_t b)
782
{
783
    float64 fa, fb, fr;
784

    
785
    fa = t_to_float64(a);
786
    fb = t_to_float64(b);
787
    fr = float64_add(fa, fb, &FP_STATUS);
788
    return float64_to_t(fr);
789
}
790

    
791
uint64_t helper_subt (uint64_t a, uint64_t b)
792
{
793
    float64 fa, fb, fr;
794

    
795
    fa = t_to_float64(a);
796
    fb = t_to_float64(b);
797
    fr = float64_sub(fa, fb, &FP_STATUS);
798
    return float64_to_t(fr);
799
}
800

    
801
uint64_t helper_mult (uint64_t a, uint64_t b)
802
{
803
    float64 fa, fb, fr;
804

    
805
    fa = t_to_float64(a);
806
    fb = t_to_float64(b);
807
    fr = float64_mul(fa, fb, &FP_STATUS);
808
    return float64_to_t(fr);
809
}
810

    
811
uint64_t helper_divt (uint64_t a, uint64_t b)
812
{
813
    float64 fa, fb, fr;
814

    
815
    fa = t_to_float64(a);
816
    fb = t_to_float64(b);
817
    fr = float64_div(fa, fb, &FP_STATUS);
818
    return float64_to_t(fr);
819
}
820

    
821
uint64_t helper_sqrtt (uint64_t a)
822
{
823
    float64 fa, fr;
824

    
825
    fa = t_to_float64(a);
826
    fr = float64_sqrt(fa, &FP_STATUS);
827
    return float64_to_t(fr);
828
}
829

    
830

    
831
/* Sign copy */
832
uint64_t helper_cpys(uint64_t a, uint64_t b)
833
{
834
    return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
835
}
836

    
837
uint64_t helper_cpysn(uint64_t a, uint64_t b)
838
{
839
    return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
840
}
841

    
842
uint64_t helper_cpyse(uint64_t a, uint64_t b)
843
{
844
    return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
845
}
846

    
847

    
848
/* Comparisons */
849
uint64_t helper_cmptun (uint64_t a, uint64_t b)
850
{
851
    float64 fa, fb;
852

    
853
    fa = t_to_float64(a);
854
    fb = t_to_float64(b);
855

    
856
    if (float64_is_nan(fa) || float64_is_nan(fb))
857
        return 0x4000000000000000ULL;
858
    else
859
        return 0;
860
}
861

    
862
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
863
{
864
    float64 fa, fb;
865

    
866
    fa = t_to_float64(a);
867
    fb = t_to_float64(b);
868

    
869
    if (float64_eq(fa, fb, &FP_STATUS))
870
        return 0x4000000000000000ULL;
871
    else
872
        return 0;
873
}
874

    
875
uint64_t helper_cmptle(uint64_t a, uint64_t b)
876
{
877
    float64 fa, fb;
878

    
879
    fa = t_to_float64(a);
880
    fb = t_to_float64(b);
881

    
882
    if (float64_le(fa, fb, &FP_STATUS))
883
        return 0x4000000000000000ULL;
884
    else
885
        return 0;
886
}
887

    
888
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
889
{
890
    float64 fa, fb;
891

    
892
    fa = t_to_float64(a);
893
    fb = t_to_float64(b);
894

    
895
    if (float64_lt(fa, fb, &FP_STATUS))
896
        return 0x4000000000000000ULL;
897
    else
898
        return 0;
899
}
900

    
901
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
902
{
903
    float64 fa, fb;
904

    
905
    fa = g_to_float64(a);
906
    fb = g_to_float64(b);
907

    
908
    if (float64_eq(fa, fb, &FP_STATUS))
909
        return 0x4000000000000000ULL;
910
    else
911
        return 0;
912
}
913

    
914
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
915
{
916
    float64 fa, fb;
917

    
918
    fa = g_to_float64(a);
919
    fb = g_to_float64(b);
920

    
921
    if (float64_le(fa, fb, &FP_STATUS))
922
        return 0x4000000000000000ULL;
923
    else
924
        return 0;
925
}
926

    
927
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
928
{
929
    float64 fa, fb;
930

    
931
    fa = g_to_float64(a);
932
    fb = g_to_float64(b);
933

    
934
    if (float64_lt(fa, fb, &FP_STATUS))
935
        return 0x4000000000000000ULL;
936
    else
937
        return 0;
938
}
939

    
940
uint64_t helper_cmpfeq (uint64_t a)
941
{
942
    return !(a & 0x7FFFFFFFFFFFFFFFULL);
943
}
944

    
945
uint64_t helper_cmpfne (uint64_t a)
946
{
947
    return (a & 0x7FFFFFFFFFFFFFFFULL);
948
}
949

    
950
uint64_t helper_cmpflt (uint64_t a)
951
{
952
    return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
953
}
954

    
955
uint64_t helper_cmpfle (uint64_t a)
956
{
957
    return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
958
}
959

    
960
uint64_t helper_cmpfgt (uint64_t a)
961
{
962
    return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
963
}
964

    
965
uint64_t helper_cmpfge (uint64_t a)
966
{
967
    return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
968
}
969

    
970

    
971
/* Floating point format conversion */
972
uint64_t helper_cvtts (uint64_t a)
973
{
974
    float64 fa;
975
    float32 fr;
976

    
977
    fa = t_to_float64(a);
978
    fr = float64_to_float32(fa, &FP_STATUS);
979
    return float32_to_s(fr);
980
}
981

    
982
uint64_t helper_cvtst (uint64_t a)
983
{
984
    float32 fa;
985
    float64 fr;
986

    
987
    fa = s_to_float32(a);
988
    fr = float32_to_float64(fa, &FP_STATUS);
989
    return float64_to_t(fr);
990
}
991

    
992
uint64_t helper_cvtqs (uint64_t a)
993
{
994
    float32 fr = int64_to_float32(a, &FP_STATUS);
995
    return float32_to_s(fr);
996
}
997

    
998
uint64_t helper_cvttq (uint64_t a)
999
{
1000
    float64 fa = t_to_float64(a);
1001
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1002
}
1003

    
1004
uint64_t helper_cvtqt (uint64_t a)
1005
{
1006
    float64 fr = int64_to_float64(a, &FP_STATUS);
1007
    return float64_to_t(fr);
1008
}
1009

    
1010
uint64_t helper_cvtqf (uint64_t a)
1011
{
1012
    float32 fr = int64_to_float32(a, &FP_STATUS);
1013
    return float32_to_f(fr);
1014
}
1015

    
1016
uint64_t helper_cvtgf (uint64_t a)
1017
{
1018
    float64 fa;
1019
    float32 fr;
1020

    
1021
    fa = g_to_float64(a);
1022
    fr = float64_to_float32(fa, &FP_STATUS);
1023
    return float32_to_f(fr);
1024
}
1025

    
1026
uint64_t helper_cvtgq (uint64_t a)
1027
{
1028
    float64 fa = g_to_float64(a);
1029
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1030
}
1031

    
1032
uint64_t helper_cvtqg (uint64_t a)
1033
{
1034
    float64 fr;
1035
    fr = int64_to_float64(a, &FP_STATUS);
1036
    return float64_to_g(fr);
1037
}
1038

    
1039
uint64_t helper_cvtlq (uint64_t a)
1040
{
1041
    return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
1042
}
1043

    
1044
static inline uint64_t __helper_cvtql(uint64_t a, int s, int v)
1045
{
1046
    uint64_t r;
1047

    
1048
    r = ((uint64_t)(a & 0xC0000000)) << 32;
1049
    r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
1050

    
1051
    if (v && (int64_t)((int32_t)r) != (int64_t)r) {
1052
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
1053
    }
1054
    if (s) {
1055
        /* TODO */
1056
    }
1057
    return r;
1058
}
1059

    
1060
uint64_t helper_cvtql (uint64_t a)
1061
{
1062
    return __helper_cvtql(a, 0, 0);
1063
}
1064

    
1065
uint64_t helper_cvtqlv (uint64_t a)
1066
{
1067
    return __helper_cvtql(a, 0, 1);
1068
}
1069

    
1070
uint64_t helper_cvtqlsv (uint64_t a)
1071
{
1072
    return __helper_cvtql(a, 1, 1);
1073
}
1074

    
1075
/* PALcode support special instructions */
1076
#if !defined (CONFIG_USER_ONLY)
1077
void helper_hw_rei (void)
1078
{
1079
    env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1080
    env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1081
    /* XXX: re-enable interrupts and memory mapping */
1082
}
1083

    
1084
void helper_hw_ret (uint64_t a)
1085
{
1086
    env->pc = a & ~3;
1087
    env->ipr[IPR_EXC_ADDR] = a & 1;
1088
    /* XXX: re-enable interrupts and memory mapping */
1089
}
1090

    
1091
uint64_t helper_mfpr (int iprn, uint64_t val)
1092
{
1093
    uint64_t tmp;
1094

    
1095
    if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1096
        val = tmp;
1097

    
1098
    return val;
1099
}
1100

    
1101
void helper_mtpr (int iprn, uint64_t val)
1102
{
1103
    cpu_alpha_mtpr(env, iprn, val, NULL);
1104
}
1105

    
1106
void helper_set_alt_mode (void)
1107
{
1108
    env->saved_mode = env->ps & 0xC;
1109
    env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1110
}
1111

    
1112
void helper_restore_mode (void)
1113
{
1114
    env->ps = (env->ps & ~0xC) | env->saved_mode;
1115
}
1116

    
1117
#endif
1118

    
1119
/*****************************************************************************/
1120
/* Softmmu support */
1121
#if !defined (CONFIG_USER_ONLY)
1122

    
1123
/* XXX: the two following helpers are pure hacks.
1124
 *      Hopefully, we emulate the PALcode, then we should never see
1125
 *      HW_LD / HW_ST instructions.
1126
 */
1127
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1128
{
1129
    uint64_t tlb_addr, physaddr;
1130
    int index, mmu_idx;
1131
    void *retaddr;
1132

    
1133
    mmu_idx = cpu_mmu_index(env);
1134
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1135
 redo:
1136
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1137
    if ((virtaddr & TARGET_PAGE_MASK) ==
1138
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1139
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1140
    } else {
1141
        /* the page is not in the TLB : fill it */
1142
        retaddr = GETPC();
1143
        tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1144
        goto redo;
1145
    }
1146
    return physaddr;
1147
}
1148

    
1149
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1150
{
1151
    uint64_t tlb_addr, physaddr;
1152
    int index, mmu_idx;
1153
    void *retaddr;
1154

    
1155
    mmu_idx = cpu_mmu_index(env);
1156
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1157
 redo:
1158
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1159
    if ((virtaddr & TARGET_PAGE_MASK) ==
1160
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1161
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1162
    } else {
1163
        /* the page is not in the TLB : fill it */
1164
        retaddr = GETPC();
1165
        tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1166
        goto redo;
1167
    }
1168
    return physaddr;
1169
}
1170

    
1171
void helper_ldl_raw(uint64_t t0, uint64_t t1)
1172
{
1173
    ldl_raw(t1, t0);
1174
}
1175

    
1176
void helper_ldq_raw(uint64_t t0, uint64_t t1)
1177
{
1178
    ldq_raw(t1, t0);
1179
}
1180

    
1181
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1182
{
1183
    env->lock = t1;
1184
    ldl_raw(t1, t0);
1185
}
1186

    
1187
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1188
{
1189
    env->lock = t1;
1190
    ldl_raw(t1, t0);
1191
}
1192

    
1193
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1194
{
1195
    ldl_kernel(t1, t0);
1196
}
1197

    
1198
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1199
{
1200
    ldq_kernel(t1, t0);
1201
}
1202

    
1203
void helper_ldl_data(uint64_t t0, uint64_t t1)
1204
{
1205
    ldl_data(t1, t0);
1206
}
1207

    
1208
void helper_ldq_data(uint64_t t0, uint64_t t1)
1209
{
1210
    ldq_data(t1, t0);
1211
}
1212

    
1213
void helper_stl_raw(uint64_t t0, uint64_t t1)
1214
{
1215
    stl_raw(t1, t0);
1216
}
1217

    
1218
void helper_stq_raw(uint64_t t0, uint64_t t1)
1219
{
1220
    stq_raw(t1, t0);
1221
}
1222

    
1223
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1224
{
1225
    uint64_t ret;
1226

    
1227
    if (t1 == env->lock) {
1228
        stl_raw(t1, t0);
1229
        ret = 0;
1230
    } else
1231
        ret = 1;
1232

    
1233
    env->lock = 1;
1234

    
1235
    return ret;
1236
}
1237

    
1238
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1239
{
1240
    uint64_t ret;
1241

    
1242
    if (t1 == env->lock) {
1243
        stq_raw(t1, t0);
1244
        ret = 0;
1245
    } else
1246
        ret = 1;
1247

    
1248
    env->lock = 1;
1249

    
1250
    return ret;
1251
}
1252

    
1253
#define MMUSUFFIX _mmu
1254

    
1255
#define SHIFT 0
1256
#include "softmmu_template.h"
1257

    
1258
#define SHIFT 1
1259
#include "softmmu_template.h"
1260

    
1261
#define SHIFT 2
1262
#include "softmmu_template.h"
1263

    
1264
#define SHIFT 3
1265
#include "softmmu_template.h"
1266

    
1267
/* try to fill the TLB and return an exception if error. If retaddr is
1268
   NULL, it means that the function was called in C code (i.e. not
1269
   from generated code or from helper.c) */
1270
/* XXX: fix it to restore all registers */
1271
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1272
{
1273
    TranslationBlock *tb;
1274
    CPUState *saved_env;
1275
    unsigned long pc;
1276
    int ret;
1277

    
1278
    /* XXX: hack to restore env in all cases, even if not called from
1279
       generated code */
1280
    saved_env = env;
1281
    env = cpu_single_env;
1282
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1283
    if (!likely(ret == 0)) {
1284
        if (likely(retaddr)) {
1285
            /* now we have a real cpu fault */
1286
            pc = (unsigned long)retaddr;
1287
            tb = tb_find_pc(pc);
1288
            if (likely(tb)) {
1289
                /* the PC is inside the translated code. It means that we have
1290
                   a virtual CPU fault */
1291
                cpu_restore_state(tb, env, pc, NULL);
1292
            }
1293
        }
1294
        /* Exception index and error code are already set */
1295
        cpu_loop_exit();
1296
    }
1297
    env = saved_env;
1298
}
1299

    
1300
#endif