Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 14ab1634

History | View | Annotate | Download (26.1 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "softfloat.h"
23
#include "helper.h"
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27
void helper_excp (int excp, int error)
28
{
29
    env->exception_index = excp;
30
    env->error_code = error;
31
    cpu_loop_exit();
32
}
33

    
34
uint64_t helper_load_pcc (void)
35
{
36
    /* XXX: TODO */
37
    return 0;
38
}
39

    
40
uint64_t helper_load_fpcr (void)
41
{
42
    return cpu_alpha_load_fpcr (env);
43
}
44

    
45
void helper_store_fpcr (uint64_t val)
46
{
47
    cpu_alpha_store_fpcr (env, val);
48
}
49

    
50
static spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
51

    
52
uint64_t helper_rs(void)
53
{
54
    uint64_t tmp;
55

    
56
    spin_lock(&intr_cpu_lock);
57
    tmp = env->intr_flag;
58
    env->intr_flag = 1;
59
    spin_unlock(&intr_cpu_lock);
60

    
61
    return tmp;
62
}
63

    
64
uint64_t helper_rc(void)
65
{
66
    uint64_t tmp;
67

    
68
    spin_lock(&intr_cpu_lock);
69
    tmp = env->intr_flag;
70
    env->intr_flag = 0;
71
    spin_unlock(&intr_cpu_lock);
72

    
73
    return tmp;
74
}
75

    
76
uint64_t helper_addqv (uint64_t op1, uint64_t op2)
77
{
78
    uint64_t tmp = op1;
79
    op1 += op2;
80
    if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
81
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
82
    }
83
    return op1;
84
}
85

    
86
uint64_t helper_addlv (uint64_t op1, uint64_t op2)
87
{
88
    uint64_t tmp = op1;
89
    op1 = (uint32_t)(op1 + op2);
90
    if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
91
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
92
    }
93
    return op1;
94
}
95

    
96
uint64_t helper_subqv (uint64_t op1, uint64_t op2)
97
{
98
    uint64_t res;
99
    res = op1 - op2;
100
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
101
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
102
    }
103
    return res;
104
}
105

    
106
uint64_t helper_sublv (uint64_t op1, uint64_t op2)
107
{
108
    uint32_t res;
109
    res = op1 - op2;
110
    if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
111
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
112
    }
113
    return res;
114
}
115

    
116
uint64_t helper_mullv (uint64_t op1, uint64_t op2)
117
{
118
    int64_t res = (int64_t)op1 * (int64_t)op2;
119

    
120
    if (unlikely((int32_t)res != res)) {
121
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
122
    }
123
    return (int64_t)((int32_t)res);
124
}
125

    
126
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
127
{
128
    uint64_t tl, th;
129

    
130
    muls64(&tl, &th, op1, op2);
131
    /* If th != 0 && th != -1, then we had an overflow */
132
    if (unlikely((th + 1) > 1)) {
133
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
134
    }
135
    return tl;
136
}
137

    
138
uint64_t helper_umulh (uint64_t op1, uint64_t op2)
139
{
140
    uint64_t tl, th;
141

    
142
    mulu64(&tl, &th, op1, op2);
143
    return th;
144
}
145

    
146
uint64_t helper_ctpop (uint64_t arg)
147
{
148
    return ctpop64(arg);
149
}
150

    
151
uint64_t helper_ctlz (uint64_t arg)
152
{
153
    return clz64(arg);
154
}
155

    
156
uint64_t helper_cttz (uint64_t arg)
157
{
158
    return ctz64(arg);
159
}
160

    
161
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
162
{
163
    uint64_t mask;
164

    
165
    mask = 0;
166
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
167
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
168
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
169
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
170
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
171
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
172
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
173
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
174

    
175
    return op & ~mask;
176
}
177

    
178
uint64_t helper_zap(uint64_t val, uint64_t mask)
179
{
180
    return byte_zap(val, mask);
181
}
182

    
183
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
184
{
185
    return byte_zap(val, ~mask);
186
}
187

    
188
uint64_t helper_mskwh(uint64_t val, uint64_t mask)
189
{
190
    return byte_zap(val, (0x03 << (mask & 7)) >> 8);
191
}
192

    
193
uint64_t helper_inswh(uint64_t val, uint64_t mask)
194
{
195
    val >>= 64 - ((mask & 7) * 8);
196
    return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
197
}
198

    
199
uint64_t helper_msklh(uint64_t val, uint64_t mask)
200
{
201
    return byte_zap(val, (0x0F << (mask & 7)) >> 8);
202
}
203

    
204
uint64_t helper_inslh(uint64_t val, uint64_t mask)
205
{
206
    val >>= 64 - ((mask & 7) * 8);
207
    return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
208
}
209

    
210
uint64_t helper_mskqh(uint64_t val, uint64_t mask)
211
{
212
    return byte_zap(val, (0xFF << (mask & 7)) >> 8);
213
}
214

    
215
uint64_t helper_insqh(uint64_t val, uint64_t mask)
216
{
217
    val >>= 64 - ((mask & 7) * 8);
218
    return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
219
}
220

    
221
uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
222
{
223
    uint8_t opa, opb, res;
224
    int i;
225

    
226
    res = 0;
227
    for (i = 0; i < 8; i++) {
228
        opa = op1 >> (i * 8);
229
        opb = op2 >> (i * 8);
230
        if (opa >= opb)
231
            res |= 1 << i;
232
    }
233
    return res;
234
}
235

    
236
uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
237
{
238
    uint64_t res = 0;
239
    uint8_t opa, opb, opr;
240
    int i;
241

    
242
    for (i = 0; i < 8; ++i) {
243
        opa = op1 >> (i * 8);
244
        opb = op2 >> (i * 8);
245
        opr = opa < opb ? opa : opb;
246
        res |= (uint64_t)opr << (i * 8);
247
    }
248
    return res;
249
}
250

    
251
uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
252
{
253
    uint64_t res = 0;
254
    int8_t opa, opb;
255
    uint8_t opr;
256
    int i;
257

    
258
    for (i = 0; i < 8; ++i) {
259
        opa = op1 >> (i * 8);
260
        opb = op2 >> (i * 8);
261
        opr = opa < opb ? opa : opb;
262
        res |= (uint64_t)opr << (i * 8);
263
    }
264
    return res;
265
}
266

    
267
uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
268
{
269
    uint64_t res = 0;
270
    uint16_t opa, opb, opr;
271
    int i;
272

    
273
    for (i = 0; i < 4; ++i) {
274
        opa = op1 >> (i * 16);
275
        opb = op2 >> (i * 16);
276
        opr = opa < opb ? opa : opb;
277
        res |= (uint64_t)opr << (i * 16);
278
    }
279
    return res;
280
}
281

    
282
uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
283
{
284
    uint64_t res = 0;
285
    int16_t opa, opb;
286
    uint16_t opr;
287
    int i;
288

    
289
    for (i = 0; i < 4; ++i) {
290
        opa = op1 >> (i * 16);
291
        opb = op2 >> (i * 16);
292
        opr = opa < opb ? opa : opb;
293
        res |= (uint64_t)opr << (i * 16);
294
    }
295
    return res;
296
}
297

    
298
uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
299
{
300
    uint64_t res = 0;
301
    uint8_t opa, opb, opr;
302
    int i;
303

    
304
    for (i = 0; i < 8; ++i) {
305
        opa = op1 >> (i * 8);
306
        opb = op2 >> (i * 8);
307
        opr = opa > opb ? opa : opb;
308
        res |= (uint64_t)opr << (i * 8);
309
    }
310
    return res;
311
}
312

    
313
uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
314
{
315
    uint64_t res = 0;
316
    int8_t opa, opb;
317
    uint8_t opr;
318
    int i;
319

    
320
    for (i = 0; i < 8; ++i) {
321
        opa = op1 >> (i * 8);
322
        opb = op2 >> (i * 8);
323
        opr = opa > opb ? opa : opb;
324
        res |= (uint64_t)opr << (i * 8);
325
    }
326
    return res;
327
}
328

    
329
uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
330
{
331
    uint64_t res = 0;
332
    uint16_t opa, opb, opr;
333
    int i;
334

    
335
    for (i = 0; i < 4; ++i) {
336
        opa = op1 >> (i * 16);
337
        opb = op2 >> (i * 16);
338
        opr = opa > opb ? opa : opb;
339
        res |= (uint64_t)opr << (i * 16);
340
    }
341
    return res;
342
}
343

    
344
uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
345
{
346
    uint64_t res = 0;
347
    int16_t opa, opb;
348
    uint16_t opr;
349
    int i;
350

    
351
    for (i = 0; i < 4; ++i) {
352
        opa = op1 >> (i * 16);
353
        opb = op2 >> (i * 16);
354
        opr = opa > opb ? opa : opb;
355
        res |= (uint64_t)opr << (i * 16);
356
    }
357
    return res;
358
}
359

    
360
uint64_t helper_perr (uint64_t op1, uint64_t op2)
361
{
362
    uint64_t res = 0;
363
    uint8_t opa, opb, opr;
364
    int i;
365

    
366
    for (i = 0; i < 8; ++i) {
367
        opa = op1 >> (i * 8);
368
        opb = op2 >> (i * 8);
369
        if (opa >= opb)
370
            opr = opa - opb;
371
        else
372
            opr = opb - opa;
373
        res += opr;
374
    }
375
    return res;
376
}
377

    
378
uint64_t helper_pklb (uint64_t op1)
379
{
380
    return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
381
}
382

    
383
uint64_t helper_pkwb (uint64_t op1)
384
{
385
    return ((op1 & 0xff)
386
            | ((op1 >> 8) & 0xff00)
387
            | ((op1 >> 16) & 0xff0000)
388
            | ((op1 >> 24) & 0xff000000));
389
}
390

    
391
uint64_t helper_unpkbl (uint64_t op1)
392
{
393
    return (op1 & 0xff) | ((op1 & 0xff00) << 24);
394
}
395

    
396
uint64_t helper_unpkbw (uint64_t op1)
397
{
398
    return ((op1 & 0xff)
399
            | ((op1 & 0xff00) << 8)
400
            | ((op1 & 0xff0000) << 16)
401
            | ((op1 & 0xff000000) << 24));
402
}
403

    
404
/* Floating point helpers */
405

    
406
/* F floating (VAX) */
407
static inline uint64_t float32_to_f(float32 fa)
408
{
409
    uint64_t r, exp, mant, sig;
410
    CPU_FloatU a;
411

    
412
    a.f = fa;
413
    sig = ((uint64_t)a.l & 0x80000000) << 32;
414
    exp = (a.l >> 23) & 0xff;
415
    mant = ((uint64_t)a.l & 0x007fffff) << 29;
416

    
417
    if (exp == 255) {
418
        /* NaN or infinity */
419
        r = 1; /* VAX dirty zero */
420
    } else if (exp == 0) {
421
        if (mant == 0) {
422
            /* Zero */
423
            r = 0;
424
        } else {
425
            /* Denormalized */
426
            r = sig | ((exp + 1) << 52) | mant;
427
        }
428
    } else {
429
        if (exp >= 253) {
430
            /* Overflow */
431
            r = 1; /* VAX dirty zero */
432
        } else {
433
            r = sig | ((exp + 2) << 52);
434
        }
435
    }
436

    
437
    return r;
438
}
439

    
440
static inline float32 f_to_float32(uint64_t a)
441
{
442
    uint32_t exp, mant_sig;
443
    CPU_FloatU r;
444

    
445
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
446
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
447

    
448
    if (unlikely(!exp && mant_sig)) {
449
        /* Reserved operands / Dirty zero */
450
        helper_excp(EXCP_OPCDEC, 0);
451
    }
452

    
453
    if (exp < 3) {
454
        /* Underflow */
455
        r.l = 0;
456
    } else {
457
        r.l = ((exp - 2) << 23) | mant_sig;
458
    }
459

    
460
    return r.f;
461
}
462

    
463
uint32_t helper_f_to_memory (uint64_t a)
464
{
465
    uint32_t r;
466
    r =  (a & 0x00001fffe0000000ull) >> 13;
467
    r |= (a & 0x07ffe00000000000ull) >> 45;
468
    r |= (a & 0xc000000000000000ull) >> 48;
469
    return r;
470
}
471

    
472
uint64_t helper_memory_to_f (uint32_t a)
473
{
474
    uint64_t r;
475
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
476
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
477
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
478
    if (!(a & 0x00004000))
479
        r |= 0x7ll << 59;
480
    return r;
481
}
482

    
483
uint64_t helper_addf (uint64_t a, uint64_t b)
484
{
485
    float32 fa, fb, fr;
486

    
487
    fa = f_to_float32(a);
488
    fb = f_to_float32(b);
489
    fr = float32_add(fa, fb, &FP_STATUS);
490
    return float32_to_f(fr);
491
}
492

    
493
uint64_t helper_subf (uint64_t a, uint64_t b)
494
{
495
    float32 fa, fb, fr;
496

    
497
    fa = f_to_float32(a);
498
    fb = f_to_float32(b);
499
    fr = float32_sub(fa, fb, &FP_STATUS);
500
    return float32_to_f(fr);
501
}
502

    
503
uint64_t helper_mulf (uint64_t a, uint64_t b)
504
{
505
    float32 fa, fb, fr;
506

    
507
    fa = f_to_float32(a);
508
    fb = f_to_float32(b);
509
    fr = float32_mul(fa, fb, &FP_STATUS);
510
    return float32_to_f(fr);
511
}
512

    
513
uint64_t helper_divf (uint64_t a, uint64_t b)
514
{
515
    float32 fa, fb, fr;
516

    
517
    fa = f_to_float32(a);
518
    fb = f_to_float32(b);
519
    fr = float32_div(fa, fb, &FP_STATUS);
520
    return float32_to_f(fr);
521
}
522

    
523
uint64_t helper_sqrtf (uint64_t t)
524
{
525
    float32 ft, fr;
526

    
527
    ft = f_to_float32(t);
528
    fr = float32_sqrt(ft, &FP_STATUS);
529
    return float32_to_f(fr);
530
}
531

    
532

    
533
/* G floating (VAX) */
534
static inline uint64_t float64_to_g(float64 fa)
535
{
536
    uint64_t r, exp, mant, sig;
537
    CPU_DoubleU a;
538

    
539
    a.d = fa;
540
    sig = a.ll & 0x8000000000000000ull;
541
    exp = (a.ll >> 52) & 0x7ff;
542
    mant = a.ll & 0x000fffffffffffffull;
543

    
544
    if (exp == 2047) {
545
        /* NaN or infinity */
546
        r = 1; /* VAX dirty zero */
547
    } else if (exp == 0) {
548
        if (mant == 0) {
549
            /* Zero */
550
            r = 0;
551
        } else {
552
            /* Denormalized */
553
            r = sig | ((exp + 1) << 52) | mant;
554
        }
555
    } else {
556
        if (exp >= 2045) {
557
            /* Overflow */
558
            r = 1; /* VAX dirty zero */
559
        } else {
560
            r = sig | ((exp + 2) << 52);
561
        }
562
    }
563

    
564
    return r;
565
}
566

    
567
static inline float64 g_to_float64(uint64_t a)
568
{
569
    uint64_t exp, mant_sig;
570
    CPU_DoubleU r;
571

    
572
    exp = (a >> 52) & 0x7ff;
573
    mant_sig = a & 0x800fffffffffffffull;
574

    
575
    if (!exp && mant_sig) {
576
        /* Reserved operands / Dirty zero */
577
        helper_excp(EXCP_OPCDEC, 0);
578
    }
579

    
580
    if (exp < 3) {
581
        /* Underflow */
582
        r.ll = 0;
583
    } else {
584
        r.ll = ((exp - 2) << 52) | mant_sig;
585
    }
586

    
587
    return r.d;
588
}
589

    
590
uint64_t helper_g_to_memory (uint64_t a)
591
{
592
    uint64_t r;
593
    r =  (a & 0x000000000000ffffull) << 48;
594
    r |= (a & 0x00000000ffff0000ull) << 16;
595
    r |= (a & 0x0000ffff00000000ull) >> 16;
596
    r |= (a & 0xffff000000000000ull) >> 48;
597
    return r;
598
}
599

    
600
uint64_t helper_memory_to_g (uint64_t a)
601
{
602
    uint64_t r;
603
    r =  (a & 0x000000000000ffffull) << 48;
604
    r |= (a & 0x00000000ffff0000ull) << 16;
605
    r |= (a & 0x0000ffff00000000ull) >> 16;
606
    r |= (a & 0xffff000000000000ull) >> 48;
607
    return r;
608
}
609

    
610
uint64_t helper_addg (uint64_t a, uint64_t b)
611
{
612
    float64 fa, fb, fr;
613

    
614
    fa = g_to_float64(a);
615
    fb = g_to_float64(b);
616
    fr = float64_add(fa, fb, &FP_STATUS);
617
    return float64_to_g(fr);
618
}
619

    
620
uint64_t helper_subg (uint64_t a, uint64_t b)
621
{
622
    float64 fa, fb, fr;
623

    
624
    fa = g_to_float64(a);
625
    fb = g_to_float64(b);
626
    fr = float64_sub(fa, fb, &FP_STATUS);
627
    return float64_to_g(fr);
628
}
629

    
630
uint64_t helper_mulg (uint64_t a, uint64_t b)
631
{
632
    float64 fa, fb, fr;
633

    
634
    fa = g_to_float64(a);
635
    fb = g_to_float64(b);
636
    fr = float64_mul(fa, fb, &FP_STATUS);
637
    return float64_to_g(fr);
638
}
639

    
640
uint64_t helper_divg (uint64_t a, uint64_t b)
641
{
642
    float64 fa, fb, fr;
643

    
644
    fa = g_to_float64(a);
645
    fb = g_to_float64(b);
646
    fr = float64_div(fa, fb, &FP_STATUS);
647
    return float64_to_g(fr);
648
}
649

    
650
uint64_t helper_sqrtg (uint64_t a)
651
{
652
    float64 fa, fr;
653

    
654
    fa = g_to_float64(a);
655
    fr = float64_sqrt(fa, &FP_STATUS);
656
    return float64_to_g(fr);
657
}
658

    
659

    
660
/* S floating (single) */
661
static inline uint64_t float32_to_s(float32 fa)
662
{
663
    CPU_FloatU a;
664
    uint64_t r;
665

    
666
    a.f = fa;
667

    
668
    r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
669
    if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
670
        r |= 0x7ll << 59;
671
    return r;
672
}
673

    
674
static inline float32 s_to_float32(uint64_t a)
675
{
676
    CPU_FloatU r;
677
    r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
678
    return r.f;
679
}
680

    
681
uint32_t helper_s_to_memory (uint64_t a)
682
{
683
    /* Memory format is the same as float32 */
684
    float32 fa = s_to_float32(a);
685
    return *(uint32_t*)(&fa);
686
}
687

    
688
uint64_t helper_memory_to_s (uint32_t a)
689
{
690
    /* Memory format is the same as float32 */
691
    return float32_to_s(*(float32*)(&a));
692
}
693

    
694
uint64_t helper_adds (uint64_t a, uint64_t b)
695
{
696
    float32 fa, fb, fr;
697

    
698
    fa = s_to_float32(a);
699
    fb = s_to_float32(b);
700
    fr = float32_add(fa, fb, &FP_STATUS);
701
    return float32_to_s(fr);
702
}
703

    
704
uint64_t helper_subs (uint64_t a, uint64_t b)
705
{
706
    float32 fa, fb, fr;
707

    
708
    fa = s_to_float32(a);
709
    fb = s_to_float32(b);
710
    fr = float32_sub(fa, fb, &FP_STATUS);
711
    return float32_to_s(fr);
712
}
713

    
714
uint64_t helper_muls (uint64_t a, uint64_t b)
715
{
716
    float32 fa, fb, fr;
717

    
718
    fa = s_to_float32(a);
719
    fb = s_to_float32(b);
720
    fr = float32_mul(fa, fb, &FP_STATUS);
721
    return float32_to_s(fr);
722
}
723

    
724
uint64_t helper_divs (uint64_t a, uint64_t b)
725
{
726
    float32 fa, fb, fr;
727

    
728
    fa = s_to_float32(a);
729
    fb = s_to_float32(b);
730
    fr = float32_div(fa, fb, &FP_STATUS);
731
    return float32_to_s(fr);
732
}
733

    
734
uint64_t helper_sqrts (uint64_t a)
735
{
736
    float32 fa, fr;
737

    
738
    fa = s_to_float32(a);
739
    fr = float32_sqrt(fa, &FP_STATUS);
740
    return float32_to_s(fr);
741
}
742

    
743

    
744
/* T floating (double) */
745
static inline float64 t_to_float64(uint64_t a)
746
{
747
    /* Memory format is the same as float64 */
748
    CPU_DoubleU r;
749
    r.ll = a;
750
    return r.d;
751
}
752

    
753
static inline uint64_t float64_to_t(float64 fa)
754
{
755
    /* Memory format is the same as float64 */
756
    CPU_DoubleU r;
757
    r.d = fa;
758
    return r.ll;
759
}
760

    
761
uint64_t helper_addt (uint64_t a, uint64_t b)
762
{
763
    float64 fa, fb, fr;
764

    
765
    fa = t_to_float64(a);
766
    fb = t_to_float64(b);
767
    fr = float64_add(fa, fb, &FP_STATUS);
768
    return float64_to_t(fr);
769
}
770

    
771
uint64_t helper_subt (uint64_t a, uint64_t b)
772
{
773
    float64 fa, fb, fr;
774

    
775
    fa = t_to_float64(a);
776
    fb = t_to_float64(b);
777
    fr = float64_sub(fa, fb, &FP_STATUS);
778
    return float64_to_t(fr);
779
}
780

    
781
uint64_t helper_mult (uint64_t a, uint64_t b)
782
{
783
    float64 fa, fb, fr;
784

    
785
    fa = t_to_float64(a);
786
    fb = t_to_float64(b);
787
    fr = float64_mul(fa, fb, &FP_STATUS);
788
    return float64_to_t(fr);
789
}
790

    
791
uint64_t helper_divt (uint64_t a, uint64_t b)
792
{
793
    float64 fa, fb, fr;
794

    
795
    fa = t_to_float64(a);
796
    fb = t_to_float64(b);
797
    fr = float64_div(fa, fb, &FP_STATUS);
798
    return float64_to_t(fr);
799
}
800

    
801
uint64_t helper_sqrtt (uint64_t a)
802
{
803
    float64 fa, fr;
804

    
805
    fa = t_to_float64(a);
806
    fr = float64_sqrt(fa, &FP_STATUS);
807
    return float64_to_t(fr);
808
}
809

    
810

    
811
/* Sign copy */
812
uint64_t helper_cpys(uint64_t a, uint64_t b)
813
{
814
    return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
815
}
816

    
817
uint64_t helper_cpysn(uint64_t a, uint64_t b)
818
{
819
    return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
820
}
821

    
822
uint64_t helper_cpyse(uint64_t a, uint64_t b)
823
{
824
    return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
825
}
826

    
827

    
828
/* Comparisons */
829
uint64_t helper_cmptun (uint64_t a, uint64_t b)
830
{
831
    float64 fa, fb;
832

    
833
    fa = t_to_float64(a);
834
    fb = t_to_float64(b);
835

    
836
    if (float64_is_nan(fa) || float64_is_nan(fb))
837
        return 0x4000000000000000ULL;
838
    else
839
        return 0;
840
}
841

    
842
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
843
{
844
    float64 fa, fb;
845

    
846
    fa = t_to_float64(a);
847
    fb = t_to_float64(b);
848

    
849
    if (float64_eq(fa, fb, &FP_STATUS))
850
        return 0x4000000000000000ULL;
851
    else
852
        return 0;
853
}
854

    
855
uint64_t helper_cmptle(uint64_t a, uint64_t b)
856
{
857
    float64 fa, fb;
858

    
859
    fa = t_to_float64(a);
860
    fb = t_to_float64(b);
861

    
862
    if (float64_le(fa, fb, &FP_STATUS))
863
        return 0x4000000000000000ULL;
864
    else
865
        return 0;
866
}
867

    
868
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
869
{
870
    float64 fa, fb;
871

    
872
    fa = t_to_float64(a);
873
    fb = t_to_float64(b);
874

    
875
    if (float64_lt(fa, fb, &FP_STATUS))
876
        return 0x4000000000000000ULL;
877
    else
878
        return 0;
879
}
880

    
881
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
882
{
883
    float64 fa, fb;
884

    
885
    fa = g_to_float64(a);
886
    fb = g_to_float64(b);
887

    
888
    if (float64_eq(fa, fb, &FP_STATUS))
889
        return 0x4000000000000000ULL;
890
    else
891
        return 0;
892
}
893

    
894
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
895
{
896
    float64 fa, fb;
897

    
898
    fa = g_to_float64(a);
899
    fb = g_to_float64(b);
900

    
901
    if (float64_le(fa, fb, &FP_STATUS))
902
        return 0x4000000000000000ULL;
903
    else
904
        return 0;
905
}
906

    
907
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
908
{
909
    float64 fa, fb;
910

    
911
    fa = g_to_float64(a);
912
    fb = g_to_float64(b);
913

    
914
    if (float64_lt(fa, fb, &FP_STATUS))
915
        return 0x4000000000000000ULL;
916
    else
917
        return 0;
918
}
919

    
920
uint64_t helper_cmpfeq (uint64_t a)
921
{
922
    return !(a & 0x7FFFFFFFFFFFFFFFULL);
923
}
924

    
925
uint64_t helper_cmpfne (uint64_t a)
926
{
927
    return (a & 0x7FFFFFFFFFFFFFFFULL);
928
}
929

    
930
uint64_t helper_cmpflt (uint64_t a)
931
{
932
    return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
933
}
934

    
935
uint64_t helper_cmpfle (uint64_t a)
936
{
937
    return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
938
}
939

    
940
uint64_t helper_cmpfgt (uint64_t a)
941
{
942
    return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
943
}
944

    
945
uint64_t helper_cmpfge (uint64_t a)
946
{
947
    return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
948
}
949

    
950

    
951
/* Floating point format conversion */
952
uint64_t helper_cvtts (uint64_t a)
953
{
954
    float64 fa;
955
    float32 fr;
956

    
957
    fa = t_to_float64(a);
958
    fr = float64_to_float32(fa, &FP_STATUS);
959
    return float32_to_s(fr);
960
}
961

    
962
uint64_t helper_cvtst (uint64_t a)
963
{
964
    float32 fa;
965
    float64 fr;
966

    
967
    fa = s_to_float32(a);
968
    fr = float32_to_float64(fa, &FP_STATUS);
969
    return float64_to_t(fr);
970
}
971

    
972
uint64_t helper_cvtqs (uint64_t a)
973
{
974
    float32 fr = int64_to_float32(a, &FP_STATUS);
975
    return float32_to_s(fr);
976
}
977

    
978
uint64_t helper_cvttq (uint64_t a)
979
{
980
    float64 fa = t_to_float64(a);
981
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
982
}
983

    
984
uint64_t helper_cvtqt (uint64_t a)
985
{
986
    float64 fr = int64_to_float64(a, &FP_STATUS);
987
    return float64_to_t(fr);
988
}
989

    
990
uint64_t helper_cvtqf (uint64_t a)
991
{
992
    float32 fr = int64_to_float32(a, &FP_STATUS);
993
    return float32_to_f(fr);
994
}
995

    
996
uint64_t helper_cvtgf (uint64_t a)
997
{
998
    float64 fa;
999
    float32 fr;
1000

    
1001
    fa = g_to_float64(a);
1002
    fr = float64_to_float32(fa, &FP_STATUS);
1003
    return float32_to_f(fr);
1004
}
1005

    
1006
uint64_t helper_cvtgq (uint64_t a)
1007
{
1008
    float64 fa = g_to_float64(a);
1009
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1010
}
1011

    
1012
uint64_t helper_cvtqg (uint64_t a)
1013
{
1014
    float64 fr;
1015
    fr = int64_to_float64(a, &FP_STATUS);
1016
    return float64_to_g(fr);
1017
}
1018

    
1019
uint64_t helper_cvtlq (uint64_t a)
1020
{
1021
    return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
1022
}
1023

    
1024
static inline uint64_t __helper_cvtql(uint64_t a, int s, int v)
1025
{
1026
    uint64_t r;
1027

    
1028
    r = ((uint64_t)(a & 0xC0000000)) << 32;
1029
    r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
1030

    
1031
    if (v && (int64_t)((int32_t)r) != (int64_t)r) {
1032
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
1033
    }
1034
    if (s) {
1035
        /* TODO */
1036
    }
1037
    return r;
1038
}
1039

    
1040
uint64_t helper_cvtql (uint64_t a)
1041
{
1042
    return __helper_cvtql(a, 0, 0);
1043
}
1044

    
1045
uint64_t helper_cvtqlv (uint64_t a)
1046
{
1047
    return __helper_cvtql(a, 0, 1);
1048
}
1049

    
1050
uint64_t helper_cvtqlsv (uint64_t a)
1051
{
1052
    return __helper_cvtql(a, 1, 1);
1053
}
1054

    
1055
/* PALcode support special instructions */
1056
#if !defined (CONFIG_USER_ONLY)
1057
void helper_hw_rei (void)
1058
{
1059
    env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1060
    env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1061
    /* XXX: re-enable interrupts and memory mapping */
1062
}
1063

    
1064
void helper_hw_ret (uint64_t a)
1065
{
1066
    env->pc = a & ~3;
1067
    env->ipr[IPR_EXC_ADDR] = a & 1;
1068
    /* XXX: re-enable interrupts and memory mapping */
1069
}
1070

    
1071
uint64_t helper_mfpr (int iprn, uint64_t val)
1072
{
1073
    uint64_t tmp;
1074

    
1075
    if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1076
        val = tmp;
1077

    
1078
    return val;
1079
}
1080

    
1081
void helper_mtpr (int iprn, uint64_t val)
1082
{
1083
    cpu_alpha_mtpr(env, iprn, val, NULL);
1084
}
1085

    
1086
void helper_set_alt_mode (void)
1087
{
1088
    env->saved_mode = env->ps & 0xC;
1089
    env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1090
}
1091

    
1092
void helper_restore_mode (void)
1093
{
1094
    env->ps = (env->ps & ~0xC) | env->saved_mode;
1095
}
1096

    
1097
#endif
1098

    
1099
/*****************************************************************************/
1100
/* Softmmu support */
1101
#if !defined (CONFIG_USER_ONLY)
1102

    
1103
/* XXX: the two following helpers are pure hacks.
1104
 *      Hopefully, we emulate the PALcode, then we should never see
1105
 *      HW_LD / HW_ST instructions.
1106
 */
1107
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1108
{
1109
    uint64_t tlb_addr, physaddr;
1110
    int index, mmu_idx;
1111
    void *retaddr;
1112

    
1113
    mmu_idx = cpu_mmu_index(env);
1114
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1115
 redo:
1116
    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1117
    if ((virtaddr & TARGET_PAGE_MASK) ==
1118
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1119
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1120
    } else {
1121
        /* the page is not in the TLB : fill it */
1122
        retaddr = GETPC();
1123
        tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1124
        goto redo;
1125
    }
1126
    return physaddr;
1127
}
1128

    
1129
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1130
{
1131
    uint64_t tlb_addr, physaddr;
1132
    int index, mmu_idx;
1133
    void *retaddr;
1134

    
1135
    mmu_idx = cpu_mmu_index(env);
1136
    index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1137
 redo:
1138
    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1139
    if ((virtaddr & TARGET_PAGE_MASK) ==
1140
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1141
        physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1142
    } else {
1143
        /* the page is not in the TLB : fill it */
1144
        retaddr = GETPC();
1145
        tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1146
        goto redo;
1147
    }
1148
    return physaddr;
1149
}
1150

    
1151
void helper_ldl_raw(uint64_t t0, uint64_t t1)
1152
{
1153
    ldl_raw(t1, t0);
1154
}
1155

    
1156
void helper_ldq_raw(uint64_t t0, uint64_t t1)
1157
{
1158
    ldq_raw(t1, t0);
1159
}
1160

    
1161
void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1162
{
1163
    env->lock = t1;
1164
    ldl_raw(t1, t0);
1165
}
1166

    
1167
void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1168
{
1169
    env->lock = t1;
1170
    ldl_raw(t1, t0);
1171
}
1172

    
1173
void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1174
{
1175
    ldl_kernel(t1, t0);
1176
}
1177

    
1178
void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1179
{
1180
    ldq_kernel(t1, t0);
1181
}
1182

    
1183
void helper_ldl_data(uint64_t t0, uint64_t t1)
1184
{
1185
    ldl_data(t1, t0);
1186
}
1187

    
1188
void helper_ldq_data(uint64_t t0, uint64_t t1)
1189
{
1190
    ldq_data(t1, t0);
1191
}
1192

    
1193
void helper_stl_raw(uint64_t t0, uint64_t t1)
1194
{
1195
    stl_raw(t1, t0);
1196
}
1197

    
1198
void helper_stq_raw(uint64_t t0, uint64_t t1)
1199
{
1200
    stq_raw(t1, t0);
1201
}
1202

    
1203
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1204
{
1205
    uint64_t ret;
1206

    
1207
    if (t1 == env->lock) {
1208
        stl_raw(t1, t0);
1209
        ret = 0;
1210
    } else
1211
        ret = 1;
1212

    
1213
    env->lock = 1;
1214

    
1215
    return ret;
1216
}
1217

    
1218
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1219
{
1220
    uint64_t ret;
1221

    
1222
    if (t1 == env->lock) {
1223
        stq_raw(t1, t0);
1224
        ret = 0;
1225
    } else
1226
        ret = 1;
1227

    
1228
    env->lock = 1;
1229

    
1230
    return ret;
1231
}
1232

    
1233
#define MMUSUFFIX _mmu
1234

    
1235
#define SHIFT 0
1236
#include "softmmu_template.h"
1237

    
1238
#define SHIFT 1
1239
#include "softmmu_template.h"
1240

    
1241
#define SHIFT 2
1242
#include "softmmu_template.h"
1243

    
1244
#define SHIFT 3
1245
#include "softmmu_template.h"
1246

    
1247
/* try to fill the TLB and return an exception if error. If retaddr is
1248
   NULL, it means that the function was called in C code (i.e. not
1249
   from generated code or from helper.c) */
1250
/* XXX: fix it to restore all registers */
1251
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1252
{
1253
    TranslationBlock *tb;
1254
    CPUState *saved_env;
1255
    unsigned long pc;
1256
    int ret;
1257

    
1258
    /* XXX: hack to restore env in all cases, even if not called from
1259
       generated code */
1260
    saved_env = env;
1261
    env = cpu_single_env;
1262
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1263
    if (!likely(ret == 0)) {
1264
        if (likely(retaddr)) {
1265
            /* now we have a real cpu fault */
1266
            pc = (unsigned long)retaddr;
1267
            tb = tb_find_pc(pc);
1268
            if (likely(tb)) {
1269
                /* the PC is inside the translated code. It means that we have
1270
                   a virtual CPU fault */
1271
                cpu_restore_state(tb, env, pc, NULL);
1272
            }
1273
        }
1274
        /* Exception index and error code are already set */
1275
        cpu_loop_exit();
1276
    }
1277
    env = saved_env;
1278
}
1279

    
1280
#endif