Statistics
| Branch: | Revision:

root / target-alpha / op_helper.c @ 4c9649a9

History | View | Annotate | Download (22.9 kB)

1
/*
2
 *  Alpha emulation cpu micro-operations helpers for qemu.
3
 * 
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

    
21
#include "exec.h"
22
#include "softfloat.h"
23

    
24
#include "op_helper.h"
25

    
26
#define MEMSUFFIX _raw
27
#include "op_helper_mem.h"
28

    
29
#if !defined(CONFIG_USER_ONLY)
30
#define MEMSUFFIX _user
31
#include "op_helper_mem.h"
32

    
33
#define MEMSUFFIX _kernel
34
#include "op_helper_mem.h"
35

    
36
/* Those are used for supervisor and executive modes */
37
#define MEMSUFFIX _data
38
#include "op_helper_mem.h"
39
#endif
40

    
41
void helper_tb_flush (void)
42
{
43
    tlb_flush(env, 1);
44
}
45

    
46
void cpu_dump_EA (target_ulong EA);
47
void helper_print_mem_EA (target_ulong EA)
48
{
49
    cpu_dump_EA(EA);
50
}
51

    
52
/*****************************************************************************/
53
/* Exceptions processing helpers */
54
void helper_excp (uint32_t excp, uint32_t error)
55
{
56
    env->exception_index = excp;
57
    env->error_code = error;
58
    cpu_loop_exit();
59
}
60

    
61
void helper_amask (void)
62
{
63
    switch (env->implver) {
64
    case IMPLVER_2106x:
65
        /* EV4, EV45, LCA, LCA45 & EV5 */
66
        break;
67
    case IMPLVER_21164:
68
    case IMPLVER_21264:
69
    case IMPLVER_21364:
70
        T0 &= ~env->amask;
71
        break;
72
    }
73
}
74

    
75
void helper_load_pcc (void)
76
{
77
    /* XXX: TODO */
78
    T0 = 0;
79
}
80

    
81
void helper_load_implver (void)
82
{
83
    T0 = env->implver;
84
}
85

    
86
void helper_load_fpcr (void)
87
{
88
    T0 = 0;
89
#ifdef CONFIG_SOFTFLOAT
90
    T0 |= env->fp_status.float_exception_flags << 52;
91
    if (env->fp_status.float_exception_flags)
92
        T0 |= 1ULL << 63;
93
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
94
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
95
#endif
96
    switch (env->fp_status.float_rounding_mode) {
97
    case float_round_nearest_even:
98
        T0 |= 2ULL << 58;
99
        break;
100
    case float_round_down:
101
        T0 |= 1ULL << 58;
102
        break;
103
    case float_round_up:
104
        T0 |= 3ULL << 58;
105
        break;
106
    case float_round_to_zero:
107
        break;
108
    }
109
}
110

    
111
void helper_store_fpcr (void)
112
{
113
#ifdef CONFIG_SOFTFLOAT
114
    set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
115
#endif
116
    switch ((T0 >> 58) & 3) {
117
    case 0:
118
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
119
        break;
120
    case 1:
121
        set_float_rounding_mode(float_round_down, &FP_STATUS);
122
        break;
123
    case 2:
124
        set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
125
        break;
126
    case 3:
127
        set_float_rounding_mode(float_round_up, &FP_STATUS);
128
        break;
129
    }
130
}
131

    
132
void helper_load_irf (void)
133
{
134
    /* XXX: TODO */
135
    T0 = 0;
136
}
137

    
138
void helper_set_irf (void)
139
{
140
    /* XXX: TODO */
141
}
142

    
143
void helper_clear_irf (void)
144
{
145
    /* XXX: TODO */
146
}
147

    
148
void helper_addqv (void)
149
{
150
    T2 = T0;
151
    T0 += T1;
152
    if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
153
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
154
    }
155
}
156

    
157
void helper_addlv (void)
158
{
159
    T2 = T0;
160
    T0 = (uint32_t)(T0 + T1);
161
    if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
162
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
163
    }
164
}
165

    
166
void helper_subqv (void)
167
{
168
    T2 = T0;
169
    T0 -= T1;
170
    if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
171
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
172
    }
173
}
174

    
175
void helper_sublv (void)
176
{
177
    T2 = T0;
178
    T0 = (uint32_t)(T0 - T1);
179
    if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
180
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
181
    }
182
}
183

    
184
void helper_mullv (void)
185
{
186
    int64_t res = (int64_t)T0 * (int64_t)T1;
187

    
188
    if (unlikely((int32_t)res != res)) {
189
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
190
    }
191
    T0 = (int64_t)((int32_t)res);
192
}
193

    
194
void helper_mulqv ()
195
{
196
    uint64_t res, tmp0, tmp1;
197

    
198
    res = (T0 >> 32) * (T1 >> 32);
199
    tmp0 = ((T0 & 0xFFFFFFFF) * (T1 >> 32)) +
200
        ((T0 >> 32) * (T1 & 0xFFFFFFFF));
201
    tmp1 = (T0 & 0xFFFFFFFF) * (T1 & 0xFFFFFFFF);
202
    tmp0 += tmp1 >> 32;
203
    res += tmp0 >> 32;
204
    T0 *= T1;
205
    if (unlikely(res != 0)) {
206
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
207
    }
208
}
209

    
210
void helper_umulh (void)
211
{
212
    uint64_t tmp0, tmp1;
213

    
214
    tmp0 = ((T0 & 0xFFFFFFFF) * (T1 >> 32)) +
215
        ((T0 >> 32) * (T1 & 0xFFFFFFFF));
216
    tmp1 = (T0 & 0xFFFFFFFF) * (T1 & 0xFFFFFFFF);
217
    tmp0 += tmp1 >> 32;
218
    T0 = (T0 >> 32) * (T0 >> 32);
219
    T0 += tmp0 >> 32;
220
}
221

    
222
void helper_ctpop (void)
223
{
224
    int n;
225

    
226
    for (n = 0; T0 != 0; n++)
227
        T0 = T0 ^ (T0 - 1);
228
    T0 = n;
229
}
230

    
231
void helper_ctlz (void)
232
{
233
    uint32_t op32;
234
    int n;
235

    
236
    n = 0;
237
    if (!(T0 & 0xFFFFFFFF00000000ULL)) {
238
        n += 32;
239
        T0 <<= 32;
240
    }
241
    /* Make it easier for 32 bits hosts */
242
    op32 = T0 >> 32;
243
    if (!(op32 & 0xFFFF0000UL)) {
244
        n += 16;
245
        op32 <<= 16;
246
    }
247
    if (!(op32 & 0xFF000000UL)) {
248
        n += 8;
249
        op32 <<= 8;
250
    }
251
    if (!(op32 & 0xF0000000UL)) {
252
        n += 4;
253
        op32 <<= 4;
254
    }
255
    if (!(op32 & 0xC0000000UL)) {
256
        n += 2;
257
        op32 <<= 2;
258
    }
259
    if (!(op32 & 0x80000000UL)) {
260
        n++;
261
        op32 <<= 1;
262
    }
263
    if (!(op32 & 0x80000000UL)) {
264
        n++;
265
    }
266
    T0 = n;
267
}
268

    
269
void helper_cttz (void)
270
{
271
    uint32_t op32;
272
    int n;
273

    
274
    n = 0;
275
    if (!(T0 & 0x00000000FFFFFFFFULL)) {
276
        n += 32;
277
        T0 >>= 32;
278
    }
279
    /* Make it easier for 32 bits hosts */
280
    op32 = T0;
281
    if (!(op32 & 0x0000FFFFUL)) {
282
        n += 16;
283
        op32 >>= 16;
284
    }
285
    if (!(op32 & 0x000000FFUL)) {
286
        n += 8;
287
        op32 >>= 8;
288
    }
289
    if (!(op32 & 0x0000000FUL)) {
290
        n += 4;
291
        op32 >>= 4;
292
    }
293
    if (!(op32 & 0x00000003UL)) {
294
        n += 2;
295
        op32 >>= 2;
296
    }
297
    if (!(op32 & 0x00000001UL)) {
298
        n++;
299
        op32 >>= 1;
300
    }
301
    if (!(op32 & 0x00000001UL)) {
302
        n++;
303
    }
304
    T0 = n;
305
}
306

    
307
static inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
308
{
309
    uint64_t mask;
310

    
311
    mask = 0;
312
    mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
313
    mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
314
    mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
315
    mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
316
    mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
317
    mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
318
    mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
319
    mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
320

    
321
    return op & ~mask;
322
}
323

    
324
void helper_mskbl (void)
325
{
326
    T0 = byte_zap(T0, 0x01 << (T1 & 7));
327
}
328

    
329
void helper_extbl (void)
330
{
331
    T0 >>= (T1 & 7) * 8;
332
    T0 = byte_zap(T0, 0xFE);
333
}
334

    
335
void helper_insbl (void)
336
{
337
    T0 <<= (T1 & 7) * 8;
338
    T0 = byte_zap(T0, ~(0x01 << (T1 & 7)));
339
}
340

    
341
void helper_mskwl (void)
342
{
343
    T0 = byte_zap(T0, 0x03 << (T1 & 7));
344
}
345

    
346
void helper_extwl (void)
347
{
348
    T0 >>= (T1 & 7) * 8;
349
    T0 = byte_zap(T0, 0xFC);
350
}
351

    
352
void helper_inswl (void)
353
{
354
    T0 <<= (T1 & 7) * 8;
355
    T0 = byte_zap(T0, ~(0x03 << (T1 & 7)));
356
}
357

    
358
void helper_mskll (void)
359
{
360
    T0 = byte_zap(T0, 0x0F << (T1 & 7));
361
}
362

    
363
void helper_extll (void)
364
{
365
    T0 >>= (T1 & 7) * 8;
366
    T0 = byte_zap(T0, 0xF0);
367
}
368

    
369
void helper_insll (void)
370
{
371
    T0 <<= (T1 & 7) * 8;
372
    T0 = byte_zap(T0, ~(0x0F << (T1 & 7)));
373
}
374

    
375
void helper_zap (void)
376
{
377
    T0 = byte_zap(T0, T1);
378
}
379

    
380
void helper_zapnot (void)
381
{
382
    T0 = byte_zap(T0, ~T1);
383
}
384

    
385
void helper_mskql (void)
386
{
387
    T0 = byte_zap(T0, 0xFF << (T1 & 7));
388
}
389

    
390
void helper_extql (void)
391
{
392
    T0 >>= (T1 & 7) * 8;
393
    T0 = byte_zap(T0, 0x00);
394
}
395

    
396
void helper_insql (void)
397
{
398
    T0 <<= (T1 & 7) * 8;
399
    T0 = byte_zap(T0, ~(0xFF << (T1 & 7)));
400
}
401

    
402
void helper_mskwh (void)
403
{
404
    T0 = byte_zap(T0, (0x03 << (T1 & 7)) >> 8);
405
}
406

    
407
void helper_inswh (void)
408
{
409
    T0 >>= 64 - ((T1 & 7) * 8);
410
    T0 = byte_zap(T0, ~((0x03 << (T1 & 7)) >> 8));
411
}
412

    
413
void helper_extwh (void)
414
{
415
    T0 <<= 64 - ((T1 & 7) * 8);
416
    T0 = byte_zap(T0, ~0x07);
417
}
418

    
419
void helper_msklh (void)
420
{
421
    T0 = byte_zap(T0, (0x0F << (T1 & 7)) >> 8);
422
}
423

    
424
void helper_inslh (void)
425
{
426
    T0 >>= 64 - ((T1 & 7) * 8);
427
    T0 = byte_zap(T0, ~((0x0F << (T1 & 7)) >> 8));
428
}
429

    
430
void helper_extlh (void)
431
{
432
    T0 <<= 64 - ((T1 & 7) * 8);
433
    T0 = byte_zap(T0, ~0x0F);
434
}
435

    
436
void helper_mskqh (void)
437
{
438
    T0 = byte_zap(T0, (0xFF << (T1 & 7)) >> 8);
439
}
440

    
441
void helper_insqh (void)
442
{
443
    T0 >>= 64 - ((T1 & 7) * 8);
444
    T0 = byte_zap(T0, ~((0xFF << (T1 & 7)) >> 8));
445
}
446

    
447
void helper_extqh (void)
448
{
449
    T0 <<= 64 - ((T1 & 7) * 8);
450
    T0 = byte_zap(T0, 0x00);
451
}
452

    
453
void helper_cmpbge (void)
454
{
455
    uint8_t opa, opb, res;
456
    int i;
457

    
458
    res = 0;
459
    for (i = 0; i < 7; i++) {
460
        opa = T0 >> (i * 8);
461
        opb = T1 >> (i * 8);
462
        if (opa >= opb)
463
            res |= 1 << i;
464
    }
465
    T0 = res;
466
}
467

    
468
void helper_cmov_fir (int freg)
469
{
470
    if (FT0 != 0)
471
        env->fir[freg] = FT1;
472
}
473

    
474
void helper_sqrts (void)
475
{
476
    FT0 = float32_sqrt(FT0, &FP_STATUS);
477
}
478

    
479
void helper_cpys (void)
480
{
481
    union {
482
        double d;
483
        uint64_t i;
484
    } p, q, r;
485

    
486
    p.d = FT0;
487
    q.d = FT1;
488
    r.i = p.i & 0x8000000000000000ULL;
489
    r.i |= q.i & ~0x8000000000000000ULL;
490
    FT0 = r.d;
491
}
492

    
493
void helper_cpysn (void)
494
{
495
    union {
496
        double d;
497
        uint64_t i;
498
    } p, q, r;
499

    
500
    p.d = FT0;
501
    q.d = FT1;
502
    r.i = (~p.i) & 0x8000000000000000ULL;
503
    r.i |= q.i & ~0x8000000000000000ULL;
504
    FT0 = r.d;
505
}
506

    
507
void helper_cpyse (void)
508
{
509
    union {
510
        double d;
511
        uint64_t i;
512
    } p, q, r;
513

    
514
    p.d = FT0;
515
    q.d = FT1;
516
    r.i = p.i & 0xFFF0000000000000ULL;
517
    r.i |= q.i & ~0xFFF0000000000000ULL;
518
    FT0 = r.d;
519
}
520

    
521
void helper_itofs (void)
522
{
523
    union {
524
        double d;
525
        uint64_t i;
526
    } p;
527

    
528
    p.d = FT0;
529
    FT0 = int64_to_float32(p.i, &FP_STATUS);
530
}
531

    
532
void helper_ftois (void)
533
{
534
    union {
535
        double d;
536
        uint64_t i;
537
    } p;
538

    
539
    p.i = float32_to_int64(FT0, &FP_STATUS);
540
    FT0 = p.d;
541
}
542

    
543
void helper_sqrtt (void)
544
{
545
    FT0 = float64_sqrt(FT0, &FP_STATUS);
546
}
547

    
548
void helper_cmptun (void)
549
{
550
    union {
551
        double d;
552
        uint64_t i;
553
    } p;
554

    
555
    p.i = 0;
556
    if (float64_is_nan(FT0) || float64_is_nan(FT1))
557
        p.i = 0x4000000000000000ULL;
558
    FT0 = p.d;
559
}
560

    
561
void helper_cmpteq (void)
562
{
563
    union {
564
        double d;
565
        uint64_t i;
566
    } p;
567

    
568
    p.i = 0;
569
    if (float64_eq(FT0, FT1, &FP_STATUS))
570
        p.i = 0x4000000000000000ULL;
571
    FT0 = p.d;
572
}
573

    
574
void helper_cmptle (void)
575
{
576
    union {
577
        double d;
578
        uint64_t i;
579
    } p;
580

    
581
    p.i = 0;
582
    if (float64_le(FT0, FT1, &FP_STATUS))
583
        p.i = 0x4000000000000000ULL;
584
    FT0 = p.d;
585
}
586

    
587
void helper_cmptlt (void)
588
{
589
    union {
590
        double d;
591
        uint64_t i;
592
    } p;
593

    
594
    p.i = 0;
595
    if (float64_lt(FT0, FT1, &FP_STATUS))
596
        p.i = 0x4000000000000000ULL;
597
    FT0 = p.d;
598
}
599

    
600
void helper_itoft (void)
601
{
602
    union {
603
        double d;
604
        uint64_t i;
605
    } p;
606

    
607
    p.d = FT0;
608
    FT0 = int64_to_float64(p.i, &FP_STATUS);
609
}
610

    
611
void helper_ftoit (void)
612
{
613
    union {
614
        double d;
615
        uint64_t i;
616
    } p;
617

    
618
    p.i = float64_to_int64(FT0, &FP_STATUS);
619
    FT0 = p.d;
620
}
621

    
622
static int vaxf_is_valid (float ff)
623
{
624
    union {
625
        float f;
626
        uint32_t i;
627
    } p;
628
    uint32_t exp, mant;
629

    
630
    p.f = ff;
631
    exp = (p.i >> 23) & 0xFF;
632
    mant = p.i & 0x007FFFFF;
633
    if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
634
        /* Reserved operands / Dirty zero */
635
        return 0;
636
    }
637

    
638
    return 1;
639
}
640

    
641
static float vaxf_to_ieee32 (float ff)
642
{
643
    union {
644
        float f;
645
        uint32_t i;
646
    } p;
647
    uint32_t exp;
648

    
649
    p.f = ff;
650
    exp = (p.i >> 23) & 0xFF;
651
    if (exp < 3) {
652
        /* Underflow */
653
        p.f = 0.0;
654
    } else {
655
        p.f *= 0.25;
656
    }
657

    
658
    return p.f;
659
}
660

    
661
static float ieee32_to_vaxf (float fi)
662
{
663
    union {
664
        float f;
665
        uint32_t i;
666
    } p;
667
    uint32_t exp, mant;
668

    
669
    p.f = fi;
670
    exp = (p.i >> 23) & 0xFF;
671
    mant = p.i & 0x007FFFFF;
672
    if (exp == 255) {
673
        /* NaN or infinity */
674
        p.i = 1;
675
    } else if (exp == 0) {
676
        if (mant == 0) {
677
            /* Zero */
678
            p.i = 0;
679
        } else {
680
            /* Denormalized */
681
            p.f *= 2.0;
682
        }
683
    } else {
684
        if (exp >= 253) {
685
            /* Overflow */
686
            p.i = 1;
687
        } else {
688
            p.f *= 4.0;
689
        }
690
    }
691

    
692
    return p.f;
693
}
694

    
695
void helper_addf (void)
696
{
697
    float ft0, ft1, ft2;
698

    
699
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
700
        /* XXX: TODO */
701
    }
702
    ft0 = vaxf_to_ieee32(FT0);
703
    ft1 = vaxf_to_ieee32(FT1);
704
    ft2 = float32_add(ft0, ft1, &FP_STATUS);
705
    FT0 = ieee32_to_vaxf(ft2);
706
}
707

    
708
void helper_subf (void)
709
{
710
    float ft0, ft1, ft2;
711

    
712
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
713
        /* XXX: TODO */
714
    }
715
    ft0 = vaxf_to_ieee32(FT0);
716
    ft1 = vaxf_to_ieee32(FT1);
717
    ft2 = float32_sub(ft0, ft1, &FP_STATUS);
718
    FT0 = ieee32_to_vaxf(ft2);
719
}
720

    
721
void helper_mulf (void)
722
{
723
    float ft0, ft1, ft2;
724

    
725
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
726
        /* XXX: TODO */
727
    }
728
    ft0 = vaxf_to_ieee32(FT0);
729
    ft1 = vaxf_to_ieee32(FT1);
730
    ft2 = float32_mul(ft0, ft1, &FP_STATUS);
731
    FT0 = ieee32_to_vaxf(ft2);
732
}
733

    
734
void helper_divf (void)
735
{
736
    float ft0, ft1, ft2;
737

    
738
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
739
        /* XXX: TODO */
740
    }
741
    ft0 = vaxf_to_ieee32(FT0);
742
    ft1 = vaxf_to_ieee32(FT1);
743
    ft2 = float32_div(ft0, ft1, &FP_STATUS);
744
    FT0 = ieee32_to_vaxf(ft2);
745
}
746

    
747
void helper_sqrtf (void)
748
{
749
    float ft0, ft1;
750

    
751
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
752
        /* XXX: TODO */
753
    }
754
    ft0 = vaxf_to_ieee32(FT0);
755
    ft1 = float32_sqrt(ft0, &FP_STATUS);
756
    FT0 = ieee32_to_vaxf(ft1);
757
}
758

    
759
void helper_itoff (void)
760
{
761
    /* XXX: TODO */
762
}
763

    
764
static int vaxg_is_valid (double ff)
765
{
766
    union {
767
        double f;
768
        uint64_t i;
769
    } p;
770
    uint64_t exp, mant;
771

    
772
    p.f = ff;
773
    exp = (p.i >> 52) & 0x7FF;
774
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
775
    if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
776
        /* Reserved operands / Dirty zero */
777
        return 0;
778
    }
779

    
780
    return 1;
781
}
782

    
783
static double vaxg_to_ieee64 (double fg)
784
{
785
    union {
786
        double f;
787
        uint64_t i;
788
    } p;
789
    uint32_t exp;
790

    
791
    p.f = fg;
792
    exp = (p.i >> 52) & 0x7FF;
793
    if (exp < 3) {
794
        /* Underflow */
795
        p.f = 0.0;
796
    } else {
797
        p.f *= 0.25;
798
    }
799

    
800
    return p.f;
801
}
802

    
803
static double ieee64_to_vaxg (double fi)
804
{
805
    union {
806
        double f;
807
        uint64_t i;
808
    } p;
809
    uint64_t mant;
810
    uint32_t exp;
811

    
812
    p.f = fi;
813
    exp = (p.i >> 52) & 0x7FF;
814
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
815
    if (exp == 255) {
816
        /* NaN or infinity */
817
        p.i = 1; /* VAX dirty zero */
818
    } else if (exp == 0) {
819
        if (mant == 0) {
820
            /* Zero */
821
            p.i = 0;
822
        } else {
823
            /* Denormalized */
824
            p.f *= 2.0;
825
        }
826
    } else {
827
        if (exp >= 2045) {
828
            /* Overflow */
829
            p.i = 1; /* VAX dirty zero */
830
        } else {
831
            p.f *= 4.0;
832
        }
833
    }
834

    
835
    return p.f;
836
}
837

    
838
void helper_addg (void)
839
{
840
    double ft0, ft1, ft2;
841

    
842
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
843
        /* XXX: TODO */
844
    }
845
    ft0 = vaxg_to_ieee64(FT0);
846
    ft1 = vaxg_to_ieee64(FT1);
847
    ft2 = float64_add(ft0, ft1, &FP_STATUS);
848
    FT0 = ieee64_to_vaxg(ft2);
849
}
850

    
851
void helper_subg (void)
852
{
853
    double ft0, ft1, ft2;
854

    
855
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
856
        /* XXX: TODO */
857
    }
858
    ft0 = vaxg_to_ieee64(FT0);
859
    ft1 = vaxg_to_ieee64(FT1);
860
    ft2 = float64_sub(ft0, ft1, &FP_STATUS);
861
    FT0 = ieee64_to_vaxg(ft2);
862
}
863

    
864
void helper_mulg (void)
865
{
866
    double ft0, ft1, ft2;
867

    
868
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
869
        /* XXX: TODO */
870
    }
871
    ft0 = vaxg_to_ieee64(FT0);
872
    ft1 = vaxg_to_ieee64(FT1);
873
    ft2 = float64_mul(ft0, ft1, &FP_STATUS);
874
    FT0 = ieee64_to_vaxg(ft2);
875
}
876

    
877
void helper_divg (void)
878
{
879
    double ft0, ft1, ft2;
880

    
881
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
882
        /* XXX: TODO */
883
    }
884
    ft0 = vaxg_to_ieee64(FT0);
885
    ft1 = vaxg_to_ieee64(FT1);
886
    ft2 = float64_div(ft0, ft1, &FP_STATUS);
887
    FT0 = ieee64_to_vaxg(ft2);
888
}
889

    
890
void helper_sqrtg (void)
891
{
892
    double ft0, ft1;
893

    
894
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
895
        /* XXX: TODO */
896
    }
897
    ft0 = vaxg_to_ieee64(FT0);
898
    ft1 = float64_sqrt(ft0, &FP_STATUS);
899
    FT0 = ieee64_to_vaxg(ft1);
900
}
901

    
902
void helper_cmpgeq (void)
903
{
904
    union {
905
        double d;
906
        uint64_t u;
907
    } p;
908
    double ft0, ft1;
909

    
910
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
911
        /* XXX: TODO */
912
    }
913
    ft0 = vaxg_to_ieee64(FT0);
914
    ft1 = vaxg_to_ieee64(FT1);
915
    p.u = 0;
916
    if (float64_eq(ft0, ft1, &FP_STATUS))
917
        p.u = 0x4000000000000000ULL;
918
    FT0 = p.d;
919
}
920

    
921
void helper_cmpglt (void)
922
{
923
    union {
924
        double d;
925
        uint64_t u;
926
    } p;
927
    double ft0, ft1;
928

    
929
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
930
        /* XXX: TODO */
931
    }
932
    ft0 = vaxg_to_ieee64(FT0);
933
    ft1 = vaxg_to_ieee64(FT1);
934
    p.u = 0;
935
    if (float64_lt(ft0, ft1, &FP_STATUS))
936
        p.u = 0x4000000000000000ULL;
937
    FT0 = p.d;
938
}
939

    
940
void helper_cmpgle (void)
941
{
942
    union {
943
        double d;
944
        uint64_t u;
945
    } p;
946
    double ft0, ft1;
947

    
948
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
949
        /* XXX: TODO */
950
    }
951
    ft0 = vaxg_to_ieee64(FT0);
952
    ft1 = vaxg_to_ieee64(FT1);
953
    p.u = 0;
954
    if (float64_le(ft0, ft1, &FP_STATUS))
955
        p.u = 0x4000000000000000ULL;
956
    FT0 = p.d;
957
}
958

    
959
void helper_cvtqs (void)
960
{
961
    union {
962
        double d;
963
        uint64_t u;
964
    } p;
965

    
966
    p.d = FT0;
967
    FT0 = (float)p.u;
968
}
969

    
970
void helper_cvttq (void)
971
{
972
    union {
973
        double d;
974
        uint64_t u;
975
    } p;
976

    
977
    p.u = FT0;
978
    FT0 = p.d;
979
}
980

    
981
void helper_cvtqt (void)
982
{
983
    union {
984
        double d;
985
        uint64_t u;
986
    } p;
987

    
988
    p.d = FT0;
989
    FT0 = p.u;
990
}
991

    
992
void helper_cvtqf (void)
993
{
994
    union {
995
        double d;
996
        uint64_t u;
997
    } p;
998

    
999
    p.d = FT0;
1000
    FT0 = ieee32_to_vaxf(p.u);
1001
}
1002

    
1003
void helper_cvtgf (void)
1004
{
1005
    double ft0;
1006

    
1007
    ft0 = vaxg_to_ieee64(FT0);
1008
    FT0 = ieee32_to_vaxf(ft0);
1009
}
1010

    
1011
void helper_cvtgd (void)
1012
{
1013
    /* XXX: TODO */
1014
}
1015

    
1016
void helper_cvtgq (void)
1017
{
1018
    union {
1019
        double d;
1020
        uint64_t u;
1021
    } p;
1022

    
1023
    p.u = vaxg_to_ieee64(FT0);
1024
    FT0 = p.d;
1025
}
1026

    
1027
void helper_cvtqg (void)
1028
{
1029
    union {
1030
        double d;
1031
        uint64_t u;
1032
    } p;
1033

    
1034
    p.d = FT0;
1035
    FT0 = ieee64_to_vaxg(p.u);
1036
}
1037

    
1038
void helper_cvtdg (void)
1039
{
1040
    /* XXX: TODO */
1041
}
1042

    
1043
void helper_cvtlq (void)
1044
{
1045
    union {
1046
        double d;
1047
        uint64_t u;
1048
    } p, q;
1049

    
1050
    p.d = FT0;
1051
    q.u = (p.u >> 29) & 0x3FFFFFFF;
1052
    q.u |= (p.u >> 32);
1053
    q.u = (int64_t)((int32_t)q.u);
1054
    FT0 = q.d;
1055
}
1056

    
1057
static inline void __helper_cvtql (int s, int v)
1058
{
1059
    union {
1060
        double d;
1061
        uint64_t u;
1062
    } p, q;
1063

    
1064
    p.d = FT0;
1065
    q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
1066
    q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
1067
    FT0 = q.d;
1068
    if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
1069
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
1070
    }
1071
    if (s) {
1072
        /* TODO */
1073
    }
1074
}
1075

    
1076
void helper_cvtql (void)
1077
{
1078
    __helper_cvtql(0, 0);
1079
}
1080

    
1081
void helper_cvtqlv (void)
1082
{
1083
    __helper_cvtql(0, 1);
1084
}
1085

    
1086
void helper_cvtqlsv (void)
1087
{
1088
    __helper_cvtql(1, 1);
1089
}
1090

    
1091
void helper_cmpfeq (void)
1092
{
1093
    if (float64_eq(FT0, FT1, &FP_STATUS))
1094
        T0 = 1;
1095
    else
1096
        T0 = 0;
1097
}
1098

    
1099
void helper_cmpfne (void)
1100
{
1101
    if (float64_eq(FT0, FT1, &FP_STATUS))
1102
        T0 = 0;
1103
    else
1104
        T0 = 1;
1105
}
1106

    
1107
void helper_cmpflt (void)
1108
{
1109
    if (float64_lt(FT0, FT1, &FP_STATUS))
1110
        T0 = 1;
1111
    else
1112
        T0 = 0;
1113
}
1114

    
1115
void helper_cmpfle (void)
1116
{
1117
    if (float64_lt(FT0, FT1, &FP_STATUS))
1118
        T0 = 1;
1119
    else
1120
        T0 = 0;
1121
}
1122

    
1123
void helper_cmpfgt (void)
1124
{
1125
    if (float64_le(FT0, FT1, &FP_STATUS))
1126
        T0 = 0;
1127
    else
1128
        T0 = 1;
1129
}
1130

    
1131
void helper_cmpfge (void)
1132
{
1133
    if (float64_lt(FT0, FT1, &FP_STATUS))
1134
        T0 = 0;
1135
    else
1136
        T0 = 1;
1137
}
1138

    
1139
#if !defined (CONFIG_USER_ONLY)
1140
void helper_mfpr (int iprn)
1141
{
1142
    uint64_t val;
1143

    
1144
    if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1145
        T0 = val;
1146
}
1147

    
1148
void helper_mtpr (int iprn)
1149
{
1150
    cpu_alpha_mtpr(env, iprn, T0, NULL);
1151
}
1152
#endif
1153

    
1154
/*****************************************************************************/
1155
/* Softmmu support */
1156
#if !defined (CONFIG_USER_ONLY)
1157

    
1158
#define GETPC() (__builtin_return_address(0))
1159

    
1160
/* XXX: the two following helpers are pure hacks.
1161
 *      Hopefully, we emulate the PALcode, then we should never see
1162
 *      HW_LD / HW_ST instructions.
1163
 */
1164
void helper_ld_phys_to_virt (void)
1165
{
1166
    uint64_t tlb_addr, physaddr;
1167
    int index, is_user;
1168
    void *retaddr;
1169

    
1170
    is_user = (env->ps >> 3) & 3;
1171
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1172
 redo:
1173
    tlb_addr = env->tlb_table[is_user][index].addr_read;
1174
    if ((T0 & TARGET_PAGE_MASK) ==
1175
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1176
        physaddr = T0 + env->tlb_table[is_user][index].addend;
1177
    } else {
1178
        /* the page is not in the TLB : fill it */
1179
        retaddr = GETPC();
1180
        tlb_fill(T0, 0, is_user, retaddr);
1181
        goto redo;
1182
    }
1183
    T0 = physaddr;
1184
}
1185

    
1186
void helper_st_phys_to_virt (void)
1187
{
1188
    uint64_t tlb_addr, physaddr;
1189
    int index, is_user;
1190
    void *retaddr;
1191

    
1192
    is_user = (env->ps >> 3) & 3;
1193
    index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1194
 redo:
1195
    tlb_addr = env->tlb_table[is_user][index].addr_write;
1196
    if ((T0 & TARGET_PAGE_MASK) ==
1197
        (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1198
        physaddr = T0 + env->tlb_table[is_user][index].addend;
1199
    } else {
1200
        /* the page is not in the TLB : fill it */
1201
        retaddr = GETPC();
1202
        tlb_fill(T0, 1, is_user, retaddr);
1203
        goto redo;
1204
    }
1205
    T0 = physaddr;
1206
}
1207

    
1208
#define MMUSUFFIX _mmu
1209

    
1210
#define SHIFT 0
1211
#include "softmmu_template.h"
1212

    
1213
#define SHIFT 1
1214
#include "softmmu_template.h"
1215

    
1216
#define SHIFT 2
1217
#include "softmmu_template.h"
1218

    
1219
#define SHIFT 3
1220
#include "softmmu_template.h"
1221

    
1222
/* try to fill the TLB and return an exception if error. If retaddr is
1223
   NULL, it means that the function was called in C code (i.e. not
1224
   from generated code or from helper.c) */
1225
/* XXX: fix it to restore all registers */
1226
void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
1227
{
1228
    TranslationBlock *tb;
1229
    CPUState *saved_env;
1230
    target_phys_addr_t pc;
1231
    int ret;
1232

    
1233
    /* XXX: hack to restore env in all cases, even if not called from
1234
       generated code */
1235
    saved_env = env;
1236
    env = cpu_single_env;
1237
    ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, is_user, 1);
1238
    if (!likely(ret == 0)) {
1239
        if (likely(retaddr)) {
1240
            /* now we have a real cpu fault */
1241
            pc = (target_phys_addr_t)retaddr;
1242
            tb = tb_find_pc(pc);
1243
            if (likely(tb)) {
1244
                /* the PC is inside the translated code. It means that we have
1245
                   a virtual CPU fault */
1246
                cpu_restore_state(tb, env, pc, NULL);
1247
            }
1248
        }
1249
        /* Exception index and error code are already set */
1250
        cpu_loop_exit();
1251
    }
1252
    env = saved_env;
1253
}
1254

    
1255
#endif