Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ ffec44f1

History | View | Annotate | Download (25.8 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "softfloat.h"
23
#include "helper.h"
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27
void helper_excp (int excp, int error)
28
{
29
    env->exception_index = excp;
30
    env->error_code = error;
31
    cpu_loop_exit();
32
}
33

    
34
uint64_t helper_load_pcc (void)
35
{
36
    /* XXX: TODO */
37
    return 0;
38
}
39

    
40
uint64_t helper_load_fpcr (void)
41
{
42
    return cpu_alpha_load_fpcr (env);
43
}
44

    
45
void helper_store_fpcr (uint64_t val)
46
{
47
    cpu_alpha_store_fpcr (env, val);
48
}
49

    
50
static spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
51

    
52
uint64_t helper_rs(void)
53
{
54
    uint64_t tmp;
55

    
56
    spin_lock(&intr_cpu_lock);
57
    tmp = env->intr_flag;
58
    env->intr_flag = 1;
59
    spin_unlock(&intr_cpu_lock);
60

    
61
    return tmp;
62
}
63

    
64
uint64_t helper_rc(void)
65
{
66
    uint64_t tmp;
67

    
68
    spin_lock(&intr_cpu_lock);
69
    tmp = env->intr_flag;
70
    env->intr_flag = 0;
71
    spin_unlock(&intr_cpu_lock);
72

    
73
    return tmp;
74
}
75

    
76
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
77
{
78
    uint64_t tmp = op1;
79
    op1 += op2;
80
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
81
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
82
    }
83
    return op1;
84
}
85

    
86
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
87
{
88
    uint64_t tmp = op1;
89
    op1 = (uint32_t)(op1 + op2);
90
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
91
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
92
    }
93
    return op1;
94
}
95

    
96
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
97
{
98
    uint64_t res;
99
    res = op1 - op2;
100
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
101
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
102
    }
103
    return res;
104
}
105

    
106
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
107
{
108
    uint32_t res;
109
    res = op1 - op2;
110
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
111
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
112
    }
113
    return res;
114
}
115

    
116
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
117
{
118
    int64_t res = (int64_t)op1 * (int64_t)op2;
119

    
120
    if (unlikely((int32_t)res != res)) {
121
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
122
    }
123
    return (int64_t)((int32_t)res);
124
}
125

    
126
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
127
{
128
    uint64_t tl, th;
129

    
130
    muls64(&tl, &th, op1, op2);
131
    /* If th != 0 && th != -1, then we had an overflow */
132
    if (unlikely((th + 1) > 1)) {
133
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
134
    }
135
    return tl;
136
}
137

    
138
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
139
{
140
    uint64_t tl, th;
141

    
142
    mulu64(&tl, &th, op1, op2);
143
    return th;
144
}
145

    
146
uint64_t helper_ctpop (uint64_t arg)
147
{
148
    return ctpop64(arg);
149
}
150

    
151
uint64_t helper_ctlz (uint64_t arg)
152
{
153
    return clz64(arg);
154
}
155

    
156
uint64_t helper_cttz (uint64_t arg)
157
{
158
    return ctz64(arg);
159
}
160

    
161
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
162
{
163
    uint64_t mask;
164

    
165
    mask = 0;
166
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
167
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
168
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
169
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
170
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
171
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
172
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
173
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
174

    
175
    return op & ~mask;
176
}
177

    
178
uint64_t helper_zap(uint64_t val, uint64_t mask)
179
{
180
    return byte_zap(val, mask);
181
}
182

    
183
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
184
{
185
    return byte_zap(val, ~mask);
186
}
187

    
188
uint64_t helper_inswh(uint64_t val, uint64_t mask)
189
{
190
    val >>= 64 - ((mask & 7) * 8);
191
    return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
192
}
193

    
194
uint64_t helper_inslh(uint64_t val, uint64_t mask)
195
{
196
    val >>= 64 - ((mask & 7) * 8);
197
    return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
198
}
199

    
200
uint64_t helper_insqh(uint64_t val, uint64_t mask)
201
{
202
    val >>= 64 - ((mask & 7) * 8);
203
    return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
204
}
205

    
206
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
207
{
208
    uint8_t opa, opb, res;
209
    int i;
210

    
211
    res = 0;
212
    for (i = 0; i < 8; i++) {
213
        opa = op1 >> (i * 8);
214
        opb = op2 >> (i * 8);
215
        if (opa >= opb)
216
            res |= 1 << i;
217
    }
218
    return res;
219
}
220

    
221
uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
222
{
223
    uint64_t res = 0;
224
    uint8_t opa, opb, opr;
225
    int i;
226

    
227
    for (i = 0; i < 8; ++i) {
228
        opa = op1 >> (i * 8);
229
        opb = op2 >> (i * 8);
230
        opr = opa < opb ? opa : opb;
231
        res |= (uint64_t)opr << (i * 8);
232
    }
233
    return res;
234
}
235

    
236
uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
237
{
238
    uint64_t res = 0;
239
    int8_t opa, opb;
240
    uint8_t opr;
241
    int i;
242

    
243
    for (i = 0; i < 8; ++i) {
244
        opa = op1 >> (i * 8);
245
        opb = op2 >> (i * 8);
246
        opr = opa < opb ? opa : opb;
247
        res |= (uint64_t)opr << (i * 8);
248
    }
249
    return res;
250
}
251

    
252
uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
253
{
254
    uint64_t res = 0;
255
    uint16_t opa, opb, opr;
256
    int i;
257

    
258
    for (i = 0; i < 4; ++i) {
259
        opa = op1 >> (i * 16);
260
        opb = op2 >> (i * 16);
261
        opr = opa < opb ? opa : opb;
262
        res |= (uint64_t)opr << (i * 16);
263
    }
264
    return res;
265
}
266

    
267
uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
268
{
269
    uint64_t res = 0;
270
    int16_t opa, opb;
271
    uint16_t opr;
272
    int i;
273

    
274
    for (i = 0; i < 4; ++i) {
275
        opa = op1 >> (i * 16);
276
        opb = op2 >> (i * 16);
277
        opr = opa < opb ? opa : opb;
278
        res |= (uint64_t)opr << (i * 16);
279
    }
280
    return res;
281
}
282

    
283
uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
284
{
285
    uint64_t res = 0;
286
    uint8_t opa, opb, opr;
287
    int i;
288

    
289
    for (i = 0; i < 8; ++i) {
290
        opa = op1 >> (i * 8);
291
        opb = op2 >> (i * 8);
292
        opr = opa > opb ? opa : opb;
293
        res |= (uint64_t)opr << (i * 8);
294
    }
295
    return res;
296
}
297

    
298
uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
299
{
300
    uint64_t res = 0;
301
    int8_t opa, opb;
302
    uint8_t opr;
303
    int i;
304

    
305
    for (i = 0; i < 8; ++i) {
306
        opa = op1 >> (i * 8);
307
        opb = op2 >> (i * 8);
308
        opr = opa > opb ? opa : opb;
309
        res |= (uint64_t)opr << (i * 8);
310
    }
311
    return res;
312
}
313

    
314
uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
315
{
316
    uint64_t res = 0;
317
    uint16_t opa, opb, opr;
318
    int i;
319

    
320
    for (i = 0; i < 4; ++i) {
321
        opa = op1 >> (i * 16);
322
        opb = op2 >> (i * 16);
323
        opr = opa > opb ? opa : opb;
324
        res |= (uint64_t)opr << (i * 16);
325
    }
326
    return res;
327
}
328

    
329
uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
330
{
331
    uint64_t res = 0;
332
    int16_t opa, opb;
333
    uint16_t opr;
334
    int i;
335

    
336
    for (i = 0; i < 4; ++i) {
337
        opa = op1 >> (i * 16);
338
        opb = op2 >> (i * 16);
339
        opr = opa > opb ? opa : opb;
340
        res |= (uint64_t)opr << (i * 16);
341
    }
342
    return res;
343
}
344

    
345
uint64_t helper_perr (uint64_t op1, uint64_t op2)
346
{
347
    uint64_t res = 0;
348
    uint8_t opa, opb, opr;
349
    int i;
350

    
351
    for (i = 0; i < 8; ++i) {
352
        opa = op1 >> (i * 8);
353
        opb = op2 >> (i * 8);
354
        if (opa >= opb)
355
            opr = opa - opb;
356
        else
357
            opr = opb - opa;
358
        res += opr;
359
    }
360
    return res;
361
}
362

    
363
uint64_t helper_pklb (uint64_t op1)
364
{
365
    return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
366
}
367

    
368
uint64_t helper_pkwb (uint64_t op1)
369
{
370
    return ((op1 & 0xff)
371
            | ((op1 >> 8) & 0xff00)
372
            | ((op1 >> 16) & 0xff0000)
373
            | ((op1 >> 24) & 0xff000000));
374
}
375

    
376
uint64_t helper_unpkbl (uint64_t op1)
377
{
378
    return (op1 & 0xff) | ((op1 & 0xff00) << 24);
379
}
380

    
381
uint64_t helper_unpkbw (uint64_t op1)
382
{
383
    return ((op1 & 0xff)
384
            | ((op1 & 0xff00) << 8)
385
            | ((op1 & 0xff0000) << 16)
386
            | ((op1 & 0xff000000) << 24));
387
}
388

    
389
/* Floating point helpers */
390

    
391
/* F floating (VAX) */
392
static inline uint64_t float32_to_f(float32 fa)
393
{
394
    uint64_t r, exp, mant, sig;
395
    CPU_FloatU a;
396

    
397
    a.f = fa;
398
    sig = ((uint64_t)a.l & 0x80000000) << 32;
399
    exp = (a.l >> 23) & 0xff;
400
    mant = ((uint64_t)a.l & 0x007fffff) << 29;
401

    
402
    if (exp == 255) {
403
        /* NaN or infinity */
404
        r = 1; /* VAX dirty zero */
405
    } else if (exp == 0) {
406
        if (mant == 0) {
407
            /* Zero */
408
            r = 0;
409
        } else {
410
            /* Denormalized */
411
            r = sig | ((exp + 1) << 52) | mant;
412
        }
413
    } else {
414
        if (exp >= 253) {
415
            /* Overflow */
416
            r = 1; /* VAX dirty zero */
417
        } else {
418
            r = sig | ((exp + 2) << 52);
419
        }
420
    }
421

    
422
    return r;
423
}
424

    
425
static inline float32 f_to_float32(uint64_t a)
426
{
427
    uint32_t exp, mant_sig;
428
    CPU_FloatU r;
429

    
430
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
431
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
432

    
433
    if (unlikely(!exp && mant_sig)) {
434
        /* Reserved operands / Dirty zero */
435
        helper_excp(EXCP_OPCDEC, 0);
436
    }
437

    
438
    if (exp < 3) {
439
        /* Underflow */
440
        r.l = 0;
441
    } else {
442
        r.l = ((exp - 2) << 23) | mant_sig;
443
    }
444

    
445
    return r.f;
446
}
447

    
448
uint32_t helper_f_to_memory (uint64_t a)
449
{
450
    uint32_t r;
451
    r =  (a & 0x00001fffe0000000ull) >> 13;
452
    r |= (a & 0x07ffe00000000000ull) >> 45;
453
    r |= (a & 0xc000000000000000ull) >> 48;
454
    return r;
455
}
456

    
457
uint64_t helper_memory_to_f (uint32_t a)
458
{
459
    uint64_t r;
460
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
461
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
462
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
463
    if (!(a & 0x00004000))
464
        r |= 0x7ll << 59;
465
    return r;
466
}
467

    
468
uint64_t helper_addf (uint64_t a, uint64_t b)
469
{
470
    float32 fa, fb, fr;
471

    
472
    fa = f_to_float32(a);
473
    fb = f_to_float32(b);
474
    fr = float32_add(fa, fb, &FP_STATUS);
475
    return float32_to_f(fr);
476
}
477

    
478
uint64_t helper_subf (uint64_t a, uint64_t b)
479
{
480
    float32 fa, fb, fr;
481

    
482
    fa = f_to_float32(a);
483
    fb = f_to_float32(b);
484
    fr = float32_sub(fa, fb, &FP_STATUS);
485
    return float32_to_f(fr);
486
}
487

    
488
uint64_t helper_mulf (uint64_t a, uint64_t b)
489
{
490
    float32 fa, fb, fr;
491

    
492
    fa = f_to_float32(a);
493
    fb = f_to_float32(b);
494
    fr = float32_mul(fa, fb, &FP_STATUS);
495
    return float32_to_f(fr);
496
}
497

    
498
uint64_t helper_divf (uint64_t a, uint64_t b)
499
{
500
    float32 fa, fb, fr;
501

    
502
    fa = f_to_float32(a);
503
    fb = f_to_float32(b);
504
    fr = float32_div(fa, fb, &FP_STATUS);
505
    return float32_to_f(fr);
506
}
507

    
508
uint64_t helper_sqrtf (uint64_t t)
509
{
510
    float32 ft, fr;
511

    
512
    ft = f_to_float32(t);
513
    fr = float32_sqrt(ft, &FP_STATUS);
514
    return float32_to_f(fr);
515
}
516

    
517

    
518
/* G floating (VAX) */
519
static inline uint64_t float64_to_g(float64 fa)
520
{
521
    uint64_t r, exp, mant, sig;
522
    CPU_DoubleU a;
523

    
524
    a.d = fa;
525
    sig = a.ll & 0x8000000000000000ull;
526
    exp = (a.ll >> 52) & 0x7ff;
527
    mant = a.ll & 0x000fffffffffffffull;
528

    
529
    if (exp == 2047) {
530
        /* NaN or infinity */
531
        r = 1; /* VAX dirty zero */
532
    } else if (exp == 0) {
533
        if (mant == 0) {
534
            /* Zero */
535
            r = 0;
536
        } else {
537
            /* Denormalized */
538
            r = sig | ((exp + 1) << 52) | mant;
539
        }
540
    } else {
541
        if (exp >= 2045) {
542
            /* Overflow */
543
            r = 1; /* VAX dirty zero */
544
        } else {
545
            r = sig | ((exp + 2) << 52);
546
        }
547
    }
548

    
549
    return r;
550
}
551

    
552
static inline float64 g_to_float64(uint64_t a)
553
{
554
    uint64_t exp, mant_sig;
555
    CPU_DoubleU r;
556

    
557
    exp = (a >> 52) & 0x7ff;
558
    mant_sig = a & 0x800fffffffffffffull;
559

    
560
    if (!exp && mant_sig) {
561
        /* Reserved operands / Dirty zero */
562
        helper_excp(EXCP_OPCDEC, 0);
563
    }
564

    
565
    if (exp < 3) {
566
        /* Underflow */
567
        r.ll = 0;
568
    } else {
569
        r.ll = ((exp - 2) << 52) | mant_sig;
570
    }
571

    
572
    return r.d;
573
}
574

    
575
uint64_t helper_g_to_memory (uint64_t a)
576
{
577
    uint64_t r;
578
    r =  (a & 0x000000000000ffffull) << 48;
579
    r |= (a & 0x00000000ffff0000ull) << 16;
580
    r |= (a & 0x0000ffff00000000ull) >> 16;
581
    r |= (a & 0xffff000000000000ull) >> 48;
582
    return r;
583
}
584

    
585
uint64_t helper_memory_to_g (uint64_t a)
586
{
587
    uint64_t r;
588
    r =  (a & 0x000000000000ffffull) << 48;
589
    r |= (a & 0x00000000ffff0000ull) << 16;
590
    r |= (a & 0x0000ffff00000000ull) >> 16;
591
    r |= (a & 0xffff000000000000ull) >> 48;
592
    return r;
593
}
594

    
595
uint64_t helper_addg (uint64_t a, uint64_t b)
596
{
597
    float64 fa, fb, fr;
598

    
599
    fa = g_to_float64(a);
600
    fb = g_to_float64(b);
601
    fr = float64_add(fa, fb, &FP_STATUS);
602
    return float64_to_g(fr);
603
}
604

    
605
uint64_t helper_subg (uint64_t a, uint64_t b)
606
{
607
    float64 fa, fb, fr;
608

    
609
    fa = g_to_float64(a);
610
    fb = g_to_float64(b);
611
    fr = float64_sub(fa, fb, &FP_STATUS);
612
    return float64_to_g(fr);
613
}
614

    
615
uint64_t helper_mulg (uint64_t a, uint64_t b)
616
{
617
    float64 fa, fb, fr;
618

    
619
    fa = g_to_float64(a);
620
    fb = g_to_float64(b);
621
    fr = float64_mul(fa, fb, &FP_STATUS);
622
    return float64_to_g(fr);
623
}
624

    
625
uint64_t helper_divg (uint64_t a, uint64_t b)
626
{
627
    float64 fa, fb, fr;
628

    
629
    fa = g_to_float64(a);
630
    fb = g_to_float64(b);
631
    fr = float64_div(fa, fb, &FP_STATUS);
632
    return float64_to_g(fr);
633
}
634

    
635
uint64_t helper_sqrtg (uint64_t a)
636
{
637
    float64 fa, fr;
638

    
639
    fa = g_to_float64(a);
640
    fr = float64_sqrt(fa, &FP_STATUS);
641
    return float64_to_g(fr);
642
}
643

    
644

    
645
/* S floating (single) */
646
static inline uint64_t float32_to_s(float32 fa)
647
{
648
    CPU_FloatU a;
649
    uint64_t r;
650

    
651
    a.f = fa;
652

    
653
    r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
654
    if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
655
        r |= 0x7ll << 59;
656
    return r;
657
}
658

    
659
static inline float32 s_to_float32(uint64_t a)
660
{
661
    CPU_FloatU r;
662
    r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
663
    return r.f;
664
}
665

    
666
uint32_t helper_s_to_memory (uint64_t a)
667
{
668
    /* Memory format is the same as float32 */
669
    float32 fa = s_to_float32(a);
670
    return *(uint32_t*)(&fa);
671
}
672

    
673
uint64_t helper_memory_to_s (uint32_t a)
674
{
675
    /* Memory format is the same as float32 */
676
    return float32_to_s(*(float32*)(&a));
677
}
678

    
679
uint64_t helper_adds (uint64_t a, uint64_t b)
680
{
681
    float32 fa, fb, fr;
682

    
683
    fa = s_to_float32(a);
684
    fb = s_to_float32(b);
685
    fr = float32_add(fa, fb, &FP_STATUS);
686
    return float32_to_s(fr);
687
}
688

    
689
uint64_t helper_subs (uint64_t a, uint64_t b)
690
{
691
    float32 fa, fb, fr;
692

    
693
    fa = s_to_float32(a);
694
    fb = s_to_float32(b);
695
    fr = float32_sub(fa, fb, &FP_STATUS);
696
    return float32_to_s(fr);
697
}
698

    
699
uint64_t helper_muls (uint64_t a, uint64_t b)
700
{
701
    float32 fa, fb, fr;
702

    
703
    fa = s_to_float32(a);
704
    fb = s_to_float32(b);
705
    fr = float32_mul(fa, fb, &FP_STATUS);
706
    return float32_to_s(fr);
707
}
708

    
709
uint64_t helper_divs (uint64_t a, uint64_t b)
710
{
711
    float32 fa, fb, fr;
712

    
713
    fa = s_to_float32(a);
714
    fb = s_to_float32(b);
715
    fr = float32_div(fa, fb, &FP_STATUS);
716
    return float32_to_s(fr);
717
}
718

    
719
uint64_t helper_sqrts (uint64_t a)
720
{
721
    float32 fa, fr;
722

    
723
    fa = s_to_float32(a);
724
    fr = float32_sqrt(fa, &FP_STATUS);
725
    return float32_to_s(fr);
726
}
727

    
728

    
729
/* T floating (double) */
730
static inline float64 t_to_float64(uint64_t a)
731
{
732
    /* Memory format is the same as float64 */
733
    CPU_DoubleU r;
734
    r.ll = a;
735
    return r.d;
736
}
737

    
738
static inline uint64_t float64_to_t(float64 fa)
739
{
740
    /* Memory format is the same as float64 */
741
    CPU_DoubleU r;
742
    r.d = fa;
743
    return r.ll;
744
}
745

    
746
uint64_t helper_addt (uint64_t a, uint64_t b)
747
{
748
    float64 fa, fb, fr;
749

    
750
    fa = t_to_float64(a);
751
    fb = t_to_float64(b);
752
    fr = float64_add(fa, fb, &FP_STATUS);
753
    return float64_to_t(fr);
754
}
755

    
756
uint64_t helper_subt (uint64_t a, uint64_t b)
757
{
758
    float64 fa, fb, fr;
759

    
760
    fa = t_to_float64(a);
761
    fb = t_to_float64(b);
762
    fr = float64_sub(fa, fb, &FP_STATUS);
763
    return float64_to_t(fr);
764
}
765

    
766
uint64_t helper_mult (uint64_t a, uint64_t b)
767
{
768
    float64 fa, fb, fr;
769

    
770
    fa = t_to_float64(a);
771
    fb = t_to_float64(b);
772
    fr = float64_mul(fa, fb, &FP_STATUS);
773
    return float64_to_t(fr);
774
}
775

    
776
uint64_t helper_divt (uint64_t a, uint64_t b)
777
{
778
    float64 fa, fb, fr;
779

    
780
    fa = t_to_float64(a);
781
    fb = t_to_float64(b);
782
    fr = float64_div(fa, fb, &FP_STATUS);
783
    return float64_to_t(fr);
784
}
785

    
786
uint64_t helper_sqrtt (uint64_t a)
787
{
788
    float64 fa, fr;
789

    
790
    fa = t_to_float64(a);
791
    fr = float64_sqrt(fa, &FP_STATUS);
792
    return float64_to_t(fr);
793
}
794

    
795

    
796
/* Sign copy */
797
uint64_t helper_cpys(uint64_t a, uint64_t b)
798
{
799
    return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
800
}
801

    
802
uint64_t helper_cpysn(uint64_t a, uint64_t b)
803
{
804
    return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
805
}
806

    
807
uint64_t helper_cpyse(uint64_t a, uint64_t b)
808
{
809
    return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
810
}
811

    
812

    
813
/* Comparisons */
814
uint64_t helper_cmptun (uint64_t a, uint64_t b)
815
{
816
    float64 fa, fb;
817

    
818
    fa = t_to_float64(a);
819
    fb = t_to_float64(b);
820

    
821
    if (float64_is_nan(fa) || float64_is_nan(fb))
822
        return 0x4000000000000000ULL;
823
    else
824
        return 0;
825
}
826

    
827
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
828
{
829
    float64 fa, fb;
830

    
831
    fa = t_to_float64(a);
832
    fb = t_to_float64(b);
833

    
834
    if (float64_eq(fa, fb, &FP_STATUS))
835
        return 0x4000000000000000ULL;
836
    else
837
        return 0;
838
}
839

    
840
uint64_t helper_cmptle(uint64_t a, uint64_t b)
841
{
842
    float64 fa, fb;
843

    
844
    fa = t_to_float64(a);
845
    fb = t_to_float64(b);
846

    
847
    if (float64_le(fa, fb, &FP_STATUS))
848
        return 0x4000000000000000ULL;
849
    else
850
        return 0;
851
}
852

    
853
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
854
{
855
    float64 fa, fb;
856

    
857
    fa = t_to_float64(a);
858
    fb = t_to_float64(b);
859

    
860
    if (float64_lt(fa, fb, &FP_STATUS))
861
        return 0x4000000000000000ULL;
862
    else
863
        return 0;
864
}
865

    
866
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
867
{
868
    float64 fa, fb;
869

    
870
    fa = g_to_float64(a);
871
    fb = g_to_float64(b);
872

    
873
    if (float64_eq(fa, fb, &FP_STATUS))
874
        return 0x4000000000000000ULL;
875
    else
876
        return 0;
877
}
878

    
879
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
880
{
881
    float64 fa, fb;
882

    
883
    fa = g_to_float64(a);
884
    fb = g_to_float64(b);
885

    
886
    if (float64_le(fa, fb, &FP_STATUS))
887
        return 0x4000000000000000ULL;
888
    else
889
        return 0;
890
}
891

    
892
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
893
{
894
    float64 fa, fb;
895

    
896
    fa = g_to_float64(a);
897
    fb = g_to_float64(b);
898

    
899
    if (float64_lt(fa, fb, &FP_STATUS))
900
        return 0x4000000000000000ULL;
901
    else
902
        return 0;
903
}
904

    
905
uint64_t helper_cmpfeq (uint64_t a)
906
{
907
    return !(a & 0x7FFFFFFFFFFFFFFFULL);
908
}
909

    
910
uint64_t helper_cmpfne (uint64_t a)
911
{
912
    return (a & 0x7FFFFFFFFFFFFFFFULL);
913
}
914

    
915
uint64_t helper_cmpflt (uint64_t a)
916
{
917
    return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
918
}
919

    
920
uint64_t helper_cmpfle (uint64_t a)
921
{
922
    return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
923
}
924

    
925
uint64_t helper_cmpfgt (uint64_t a)
926
{
927
    return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
928
}
929

    
930
uint64_t helper_cmpfge (uint64_t a)
931
{
932
    return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
933
}
934

    
935

    
936
/* Floating point format conversion */
937
uint64_t helper_cvtts (uint64_t a)
938
{
939
    float64 fa;
940
    float32 fr;
941

    
942
    fa = t_to_float64(a);
943
    fr = float64_to_float32(fa, &FP_STATUS);
944
    return float32_to_s(fr);
945
}
946

    
947
uint64_t helper_cvtst (uint64_t a)
948
{
949
    float32 fa;
950
    float64 fr;
951

    
952
    fa = s_to_float32(a);
953
    fr = float32_to_float64(fa, &FP_STATUS);
954
    return float64_to_t(fr);
955
}
956

    
957
uint64_t helper_cvtqs (uint64_t a)
958
{
959
    float32 fr = int64_to_float32(a, &FP_STATUS);
960
    return float32_to_s(fr);
961
}
962

    
963
uint64_t helper_cvttq (uint64_t a)
964
{
965
    float64 fa = t_to_float64(a);
966
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
967
}
968

    
969
uint64_t helper_cvtqt (uint64_t a)
970
{
971
    float64 fr = int64_to_float64(a, &FP_STATUS);
972
    return float64_to_t(fr);
973
}
974

    
975
uint64_t helper_cvtqf (uint64_t a)
976
{
977
    float32 fr = int64_to_float32(a, &FP_STATUS);
978
    return float32_to_f(fr);
979
}
980

    
981
uint64_t helper_cvtgf (uint64_t a)
982
{
983
    float64 fa;
984
    float32 fr;
985

    
986
    fa = g_to_float64(a);
987
    fr = float64_to_float32(fa, &FP_STATUS);
988
    return float32_to_f(fr);
989
}
990

    
991
uint64_t helper_cvtgq (uint64_t a)
992
{
993
    float64 fa = g_to_float64(a);
994
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
995
}
996

    
997
uint64_t helper_cvtqg (uint64_t a)
998
{
999
    float64 fr;
1000
    fr = int64_to_float64(a, &FP_STATUS);
1001
    return float64_to_g(fr);
1002
}
1003

    
1004
uint64_t helper_cvtlq (uint64_t a)
1005
{
1006
    return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
1007
}
1008

    
1009
static inline uint64_t __helper_cvtql(uint64_t a, int s, int v)
1010
{
1011
    uint64_t r;
1012

    
1013
    r = ((uint64_t)(a & 0xC0000000)) << 32;
1014
    r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
1015

    
1016
    if (v && (int64_t)((int32_t)r) != (int64_t)r) {
1017
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
1018
    }
1019
    if (s) {
1020
        /* TODO */
1021
    }
1022
    return r;
1023
}
1024

    
1025
uint64_t helper_cvtql (uint64_t a)
1026
{
1027
    return __helper_cvtql(a, 0, 0);
1028
}
1029

    
1030
uint64_t helper_cvtqlv (uint64_t a)
1031
{
1032
    return __helper_cvtql(a, 0, 1);
1033
}
1034

    
1035
uint64_t helper_cvtqlsv (uint64_t a)
1036
{
1037
    return __helper_cvtql(a, 1, 1);
1038
}
1039

    
1040
/* PALcode support special instructions */
1041
#if !defined (CONFIG_USER_ONLY)
1042
void helper_hw_rei (void)
1043
{
1044
    env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1045
    env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1046
    /* XXX: re-enable interrupts and memory mapping */
1047
}
1048

    
1049
void helper_hw_ret (uint64_t a)
1050
{
1051
    env->pc = a & ~3;
1052
    env->ipr[IPR_EXC_ADDR] = a & 1;
1053
    /* XXX: re-enable interrupts and memory mapping */
1054
}
1055

    
1056
uint64_t helper_mfpr (int iprn, uint64_t val)
1057
{
1058
    uint64_t tmp;
1059

    
1060
    if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1061
        val = tmp;
1062

    
1063
    return val;
1064
}
1065

    
1066
void helper_mtpr (int iprn, uint64_t val)
1067
{
1068
    cpu_alpha_mtpr(env, iprn, val, NULL);
1069
}
1070

    
1071
void helper_set_alt_mode (void)
1072
{
1073
    env->saved_mode = env->ps & 0xC;
1074
    env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1075
}
1076

    
1077
void helper_restore_mode (void)
1078
{
1079
    env->ps = (env->ps & ~0xC) | env->saved_mode;
1080
}
1081

    
1082
#endif
1083

    
1084
/*****************************************************************************/
1085
/* Softmmu support */
1086
#if !defined (CONFIG_USER_ONLY)
1087

    
1088
/* XXX: the two following helpers are pure hacks.
1089
 *      Hopefully, we emulate the PALcode, then we should never see
1090
 *      HW_LD / HW_ST instructions.
1091
 */
1092
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1093
{
1094
    uint64_t tlb_addr, physaddr;
1095
    int index, mmu_idx;
1096
    void *retaddr;
1097

    
1098
    mmu_idx = cpu_mmu_index(env);
1099
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1100
 redo:
1101
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1102
    if ((virtaddr & TARGET_PAGE_MASK) ==
1103
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1104
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1105
    } else {
1106
        /* the page is not in the TLB : fill it */
1107
        retaddr = GETPC();
1108
        tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1109
        goto redo;
1110
    }
1111
    return physaddr;
1112
}
1113

    
1114
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1115
{
1116
    uint64_t tlb_addr, physaddr;
1117
    int index, mmu_idx;
1118
    void *retaddr;
1119

    
1120
    mmu_idx = cpu_mmu_index(env);
1121
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1122
 redo:
1123
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1124
    if ((virtaddr & TARGET_PAGE_MASK) ==
1125
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1126
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1127
    } else {
1128
        /* the page is not in the TLB : fill it */
1129
        retaddr = GETPC();
1130
        tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1131
        goto redo;
1132
    }
1133
    return physaddr;
1134
}
1135

    
1136
void helper_ldl_raw(uint64_t t0, uint64_t t1)
1137
{
1138
    ldl_raw(t1, t0);
1139
}
1140

    
1141
void helper_ldq_raw(uint64_t t0, uint64_t t1)
1142
{
1143
    ldq_raw(t1, t0);
1144
}
1145

    
1146
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1147
{
1148
    env->lock = t1;
1149
    ldl_raw(t1, t0);
1150
}
1151

    
1152
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1153
{
1154
    env->lock = t1;
1155
    ldl_raw(t1, t0);
1156
}
1157

    
1158
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1159
{
1160
    ldl_kernel(t1, t0);
1161
}
1162

    
1163
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1164
{
1165
    ldq_kernel(t1, t0);
1166
}
1167

    
1168
void helper_ldl_data(uint64_t t0, uint64_t t1)
1169
{
1170
    ldl_data(t1, t0);
1171
}
1172

    
1173
void helper_ldq_data(uint64_t t0, uint64_t t1)
1174
{
1175
    ldq_data(t1, t0);
1176
}
1177

    
1178
void helper_stl_raw(uint64_t t0, uint64_t t1)
1179
{
1180
    stl_raw(t1, t0);
1181
}
1182

    
1183
void helper_stq_raw(uint64_t t0, uint64_t t1)
1184
{
1185
    stq_raw(t1, t0);
1186
}
1187

    
1188
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1189
{
1190
    uint64_t ret;
1191

    
1192
    if (t1 == env->lock) {
1193
        stl_raw(t1, t0);
1194
        ret = 0;
1195
    } else
1196
        ret = 1;
1197

    
1198
    env->lock = 1;
1199

    
1200
    return ret;
1201
}
1202

    
1203
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1204
{
1205
    uint64_t ret;
1206

    
1207
    if (t1 == env->lock) {
1208
        stq_raw(t1, t0);
1209
        ret = 0;
1210
    } else
1211
        ret = 1;
1212

    
1213
    env->lock = 1;
1214

    
1215
    return ret;
1216
}
1217

    
1218
#define MMUSUFFIX _mmu
1219

    
1220
#define SHIFT 0
1221
#include "softmmu_template.h"
1222

    
1223
#define SHIFT 1
1224
#include "softmmu_template.h"
1225

    
1226
#define SHIFT 2
1227
#include "softmmu_template.h"
1228

    
1229
#define SHIFT 3
1230
#include "softmmu_template.h"
1231

    
1232
/* try to fill the TLB and return an exception if error. If retaddr is
1233
   NULL, it means that the function was called in C code (i.e. not
1234
   from generated code or from helper.c) */
1235
/* XXX: fix it to restore all registers */
1236
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1237
{
1238
    TranslationBlock *tb;
1239
    CPUState *saved_env;
1240
    unsigned long pc;
1241
    int ret;
1242

    
1243
    /* XXX: hack to restore env in all cases, even if not called from
1244
       generated code */
1245
    saved_env = env;
1246
    env = cpu_single_env;
1247
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1248
    if (!likely(ret == 0)) {
1249
        if (likely(retaddr)) {
1250
            /* now we have a real cpu fault */
1251
            pc = (unsigned long)retaddr;
1252
            tb = tb_find_pc(pc);
1253
            if (likely(tb)) {
1254
                /* the PC is inside the translated code. It means that we have
1255
                   a virtual CPU fault */
1256
                cpu_restore_state(tb, env, pc, NULL);
1257
            }
1258
        }
1259
        /* Exception index and error code are already set */
1260
        cpu_loop_exit();
1261
    }
1262
    env = saved_env;
1263
}
1264

    
1265
#endif