Revision f18cd223 target-alpha/op_helper.c

b/target-alpha/op_helper.c
24 24

  
25 25
#include "op_helper.h"
26 26

  
27
#define MEMSUFFIX _raw
28
#include "op_helper_mem.h"
29

  
30
#if !defined(CONFIG_USER_ONLY)
31
#define MEMSUFFIX _kernel
32
#include "op_helper_mem.h"
33

  
34
#define MEMSUFFIX _executive
35
#include "op_helper_mem.h"
36

  
37
#define MEMSUFFIX _supervisor
38
#include "op_helper_mem.h"
39

  
40
#define MEMSUFFIX _user
41
#include "op_helper_mem.h"
42

  
43
/* This is used for pal modes */
44
#define MEMSUFFIX _data
45
#include "op_helper_mem.h"
46
#endif
47

  
48 27
void helper_tb_flush (void)
49 28
{
50 29
    tlb_flush(env, 1);
......
91 70
    return env->implver;
92 71
}
93 72

  
94
void helper_load_fpcr (void)
73
uint64_t helper_load_fpcr (void)
95 74
{
96
    T0 = 0;
75
    uint64_t ret = 0;
97 76
#ifdef CONFIG_SOFTFLOAT
98
    T0 |= env->fp_status.float_exception_flags << 52;
77
    ret |= env->fp_status.float_exception_flags << 52;
99 78
    if (env->fp_status.float_exception_flags)
100
        T0 |= 1ULL << 63;
79
        ret |= 1ULL << 63;
101 80
    env->ipr[IPR_EXC_SUM] &= ~0x3E:
102 81
    env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
103 82
#endif
104 83
    switch (env->fp_status.float_rounding_mode) {
105 84
    case float_round_nearest_even:
106
        T0 |= 2ULL << 58;
85
        ret |= 2ULL << 58;
107 86
        break;
108 87
    case float_round_down:
109
        T0 |= 1ULL << 58;
88
        ret |= 1ULL << 58;
110 89
        break;
111 90
    case float_round_up:
112
        T0 |= 3ULL << 58;
91
        ret |= 3ULL << 58;
113 92
        break;
114 93
    case float_round_to_zero:
115 94
        break;
116 95
    }
96
    return ret;
117 97
}
118 98

  
119
void helper_store_fpcr (void)
99
void helper_store_fpcr (uint64_t val)
120 100
{
121 101
#ifdef CONFIG_SOFTFLOAT
122
    set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
102
    set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
123 103
#endif
124
    switch ((T0 >> 58) & 3) {
104
    switch ((val >> 58) & 3) {
125 105
    case 0:
126 106
        set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
127 107
        break;
......
367 347
    return res;
368 348
}
369 349

  
370
void helper_cmov_fir (int freg)
350
/* Floating point helpers */
351

  
352
/* F floating (VAX) */
353
static always_inline uint64_t float32_to_f (float32 fa)
371 354
{
372
    if (FT0 != 0)
373
        env->fir[freg] = FT1;
355
    uint32_t a;
356
    uint64_t r, exp, mant, sig;
357

  
358
    a = *(uint32_t*)(&fa);
359
    sig = ((uint64_t)a & 0x80000000) << 32;
360
    exp = (a >> 23) & 0xff;
361
    mant = ((uint64_t)a & 0x007fffff) << 29;
362

  
363
    if (exp == 255) {
364
        /* NaN or infinity */
365
        r = 1; /* VAX dirty zero */
366
    } else if (exp == 0) {
367
        if (mant == 0) {
368
            /* Zero */
369
            r = 0;
370
        } else {
371
            /* Denormalized */
372
            r = sig | ((exp + 1) << 52) | mant;
373
        }
374
    } else {
375
        if (exp >= 253) {
376
            /* Overflow */
377
            r = 1; /* VAX dirty zero */
378
        } else {
379
            r = sig | ((exp + 2) << 52);
380
        }
381
    }
382

  
383
    return r;
374 384
}
375 385

  
376
void helper_sqrts (void)
386
static always_inline float32 f_to_float32 (uint64_t a)
377 387
{
378
    FT0 = float32_sqrt(FT0, &FP_STATUS);
388
    uint32_t r, exp, mant_sig;
389

  
390
    exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
391
    mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
392

  
393
    if (unlikely(!exp && mant_sig)) {
394
        /* Reserved operands / Dirty zero */
395
        helper_excp(EXCP_OPCDEC, 0);
396
    }
397

  
398
    if (exp < 3) {
399
        /* Underflow */
400
        r = 0;
401
    } else {
402
        r = ((exp - 2) << 23) | mant_sig;
403
    }
404

  
405
    return *(float32*)(&a);
379 406
}
380 407

  
381
void helper_cpys (void)
408
uint32_t helper_f_to_memory (uint64_t a)
382 409
{
383
    union {
384
        double d;
385
        uint64_t i;
386
    } p, q, r;
410
    uint32_t r;
411
    r =  (a & 0x00001fffe0000000ull) >> 13;
412
    r |= (a & 0x07ffe00000000000ull) >> 45;
413
    r |= (a & 0xc000000000000000ull) >> 48;
414
    return r;
415
}
387 416

  
388
    p.d = FT0;
389
    q.d = FT1;
390
    r.i = p.i & 0x8000000000000000ULL;
391
    r.i |= q.i & ~0x8000000000000000ULL;
392
    FT0 = r.d;
417
uint64_t helper_memory_to_f (uint32_t a)
418
{
419
    uint64_t r;
420
    r =  ((uint64_t)(a & 0x0000c000)) << 48;
421
    r |= ((uint64_t)(a & 0x003fffff)) << 45;
422
    r |= ((uint64_t)(a & 0xffff0000)) << 13;
423
    if (!(a & 0x00004000))
424
        r |= 0x7ll << 59;
425
    return r;
393 426
}
394 427

  
395
void helper_cpysn (void)
428
uint64_t helper_addf (uint64_t a, uint64_t b)
396 429
{
397
    union {
398
        double d;
399
        uint64_t i;
400
    } p, q, r;
430
    float32 fa, fb, fr;
401 431

  
402
    p.d = FT0;
403
    q.d = FT1;
404
    r.i = (~p.i) & 0x8000000000000000ULL;
405
    r.i |= q.i & ~0x8000000000000000ULL;
406
    FT0 = r.d;
432
    fa = f_to_float32(a);
433
    fb = f_to_float32(b);
434
    fr = float32_add(fa, fb, &FP_STATUS);
435
    return float32_to_f(fr);
407 436
}
408 437

  
409
void helper_cpyse (void)
438
uint64_t helper_subf (uint64_t a, uint64_t b)
410 439
{
411
    union {
412
        double d;
413
        uint64_t i;
414
    } p, q, r;
440
    float32 fa, fb, fr;
415 441

  
416
    p.d = FT0;
417
    q.d = FT1;
418
    r.i = p.i & 0xFFF0000000000000ULL;
419
    r.i |= q.i & ~0xFFF0000000000000ULL;
420
    FT0 = r.d;
442
    fa = f_to_float32(a);
443
    fb = f_to_float32(b);
444
    fr = float32_sub(fa, fb, &FP_STATUS);
445
    return float32_to_f(fr);
421 446
}
422 447

  
423
void helper_itofs (void)
448
uint64_t helper_mulf (uint64_t a, uint64_t b)
424 449
{
425
    union {
426
        double d;
427
        uint64_t i;
428
    } p;
450
    float32 fa, fb, fr;
429 451

  
430
    p.d = FT0;
431
    FT0 = int64_to_float32(p.i, &FP_STATUS);
452
    fa = f_to_float32(a);
453
    fb = f_to_float32(b);
454
    fr = float32_mul(fa, fb, &FP_STATUS);
455
    return float32_to_f(fr);
432 456
}
433 457

  
434
void helper_ftois (void)
458
uint64_t helper_divf (uint64_t a, uint64_t b)
435 459
{
436
    union {
437
        double d;
438
        uint64_t i;
439
    } p;
460
    float32 fa, fb, fr;
440 461

  
441
    p.i = float32_to_int64(FT0, &FP_STATUS);
442
    FT0 = p.d;
462
    fa = f_to_float32(a);
463
    fb = f_to_float32(b);
464
    fr = float32_div(fa, fb, &FP_STATUS);
465
    return float32_to_f(fr);
443 466
}
444 467

  
445
void helper_sqrtt (void)
468
uint64_t helper_sqrtf (uint64_t t)
446 469
{
447
    FT0 = float64_sqrt(FT0, &FP_STATUS);
470
    float32 ft, fr;
471

  
472
    ft = f_to_float32(t);
473
    fr = float32_sqrt(ft, &FP_STATUS);
474
    return float32_to_f(fr);
448 475
}
449 476

  
450
void helper_cmptun (void)
477

  
478
/* G floating (VAX) */
479
static always_inline uint64_t float64_to_g (float64 fa)
451 480
{
452
    union {
453
        double d;
454
        uint64_t i;
455
    } p;
481
    uint64_t a, r, exp, mant, sig;
456 482

  
457
    p.i = 0;
458
    if (float64_is_nan(FT0) || float64_is_nan(FT1))
459
        p.i = 0x4000000000000000ULL;
460
    FT0 = p.d;
483
    a = *(uint64_t*)(&fa);
484
    sig = a & 0x8000000000000000ull;
485
    exp = (a >> 52) & 0x7ff;
486
    mant = a & 0x000fffffffffffffull;
487

  
488
    if (exp == 2047) {
489
        /* NaN or infinity */
490
        r = 1; /* VAX dirty zero */
491
    } else if (exp == 0) {
492
        if (mant == 0) {
493
            /* Zero */
494
            r = 0;
495
        } else {
496
            /* Denormalized */
497
            r = sig | ((exp + 1) << 52) | mant;
498
        }
499
    } else {
500
        if (exp >= 2045) {
501
            /* Overflow */
502
            r = 1; /* VAX dirty zero */
503
        } else {
504
            r = sig | ((exp + 2) << 52);
505
        }
506
    }
507

  
508
    return r;
461 509
}
462 510

  
463
void helper_cmpteq (void)
511
static always_inline float64 g_to_float64 (uint64_t a)
464 512
{
465
    union {
466
        double d;
467
        uint64_t i;
468
    } p;
513
    uint64_t r, exp, mant_sig;
514

  
515
    exp = (a >> 52) & 0x7ff;
516
    mant_sig = a & 0x800fffffffffffffull;
517

  
518
    if (!exp && mant_sig) {
519
        /* Reserved operands / Dirty zero */
520
        helper_excp(EXCP_OPCDEC, 0);
521
    }
469 522

  
470
    p.i = 0;
471
    if (float64_eq(FT0, FT1, &FP_STATUS))
472
        p.i = 0x4000000000000000ULL;
473
    FT0 = p.d;
523
    if (exp < 3) {
524
        /* Underflow */
525
        r = 0;
526
    } else {
527
        r = ((exp - 2) << 52) | mant_sig;
528
    }
529

  
530
    return *(float64*)(&a);
474 531
}
475 532

  
476
void helper_cmptle (void)
533
uint64_t helper_g_to_memory (uint64_t a)
477 534
{
478
    union {
479
        double d;
480
        uint64_t i;
481
    } p;
535
    uint64_t r;
536
    r =  (a & 0x000000000000ffffull) << 48;
537
    r |= (a & 0x00000000ffff0000ull) << 16;
538
    r |= (a & 0x0000ffff00000000ull) >> 16;
539
    r |= (a & 0xffff000000000000ull) >> 48;
540
    return r;
541
}
482 542

  
483
    p.i = 0;
484
    if (float64_le(FT0, FT1, &FP_STATUS))
485
        p.i = 0x4000000000000000ULL;
486
    FT0 = p.d;
543
uint64_t helper_memory_to_g (uint64_t a)
544
{
545
    uint64_t r;
546
    r =  (a & 0x000000000000ffffull) << 48;
547
    r |= (a & 0x00000000ffff0000ull) << 16;
548
    r |= (a & 0x0000ffff00000000ull) >> 16;
549
    r |= (a & 0xffff000000000000ull) >> 48;
550
    return r;
487 551
}
488 552

  
489
void helper_cmptlt (void)
553
uint64_t helper_addg (uint64_t a, uint64_t b)
490 554
{
491
    union {
492
        double d;
493
        uint64_t i;
494
    } p;
555
    float64 fa, fb, fr;
495 556

  
496
    p.i = 0;
497
    if (float64_lt(FT0, FT1, &FP_STATUS))
498
        p.i = 0x4000000000000000ULL;
499
    FT0 = p.d;
557
    fa = g_to_float64(a);
558
    fb = g_to_float64(b);
559
    fr = float64_add(fa, fb, &FP_STATUS);
560
    return float64_to_g(fr);
500 561
}
501 562

  
502
void helper_itoft (void)
563
uint64_t helper_subg (uint64_t a, uint64_t b)
503 564
{
504
    union {
505
        double d;
506
        uint64_t i;
507
    } p;
565
    float64 fa, fb, fr;
508 566

  
509
    p.d = FT0;
510
    FT0 = int64_to_float64(p.i, &FP_STATUS);
567
    fa = g_to_float64(a);
568
    fb = g_to_float64(b);
569
    fr = float64_sub(fa, fb, &FP_STATUS);
570
    return float64_to_g(fr);
511 571
}
512 572

  
513
void helper_ftoit (void)
573
uint64_t helper_mulg (uint64_t a, uint64_t b)
514 574
{
515
    union {
516
        double d;
517
        uint64_t i;
518
    } p;
575
    float64 fa, fb, fr;
519 576

  
520
    p.i = float64_to_int64(FT0, &FP_STATUS);
521
    FT0 = p.d;
577
    fa = g_to_float64(a);
578
    fb = g_to_float64(b);
579
    fr = float64_mul(fa, fb, &FP_STATUS);
580
    return float64_to_g(fr);
522 581
}
523 582

  
524
static always_inline int vaxf_is_valid (float ff)
583
uint64_t helper_divg (uint64_t a, uint64_t b)
525 584
{
526
    union {
527
        float f;
528
        uint32_t i;
529
    } p;
530
    uint32_t exp, mant;
585
    float64 fa, fb, fr;
531 586

  
532
    p.f = ff;
533
    exp = (p.i >> 23) & 0xFF;
534
    mant = p.i & 0x007FFFFF;
535
    if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
536
        /* Reserved operands / Dirty zero */
537
        return 0;
538
    }
587
    fa = g_to_float64(a);
588
    fb = g_to_float64(b);
589
    fr = float64_div(fa, fb, &FP_STATUS);
590
    return float64_to_g(fr);
591
}
592

  
593
uint64_t helper_sqrtg (uint64_t a)
594
{
595
    float64 fa, fr;
539 596

  
540
    return 1;
597
    fa = g_to_float64(a);
598
    fr = float64_sqrt(fa, &FP_STATUS);
599
    return float64_to_g(fr);
541 600
}
542 601

  
543
static always_inline float vaxf_to_ieee32 (float ff)
602

  
603
/* S floating (single) */
604
static always_inline uint64_t float32_to_s (float32 fa)
544 605
{
545
    union {
546
        float f;
547
        uint32_t i;
548
    } p;
549
    uint32_t exp;
606
    uint32_t a;
607
    uint64_t r;
550 608

  
551
    p.f = ff;
552
    exp = (p.i >> 23) & 0xFF;
553
    if (exp < 3) {
554
        /* Underflow */
555
        p.f = 0.0;
556
    } else {
557
        p.f *= 0.25;
558
    }
609
    a = *(uint32_t*)(&fa);
559 610

  
560
    return p.f;
611
    r = (((uint64_t)(a & 0xc0000000)) << 32) | (((uint64_t)(a & 0x3fffffff)) << 29);
612
    if (((a & 0x7f800000) != 0x7f800000) && (!(a & 0x40000000)))
613
        r |= 0x7ll << 59;
614
    return r;
561 615
}
562 616

  
563
static always_inline float ieee32_to_vaxf (float fi)
617
static always_inline float32 s_to_float32 (uint64_t a)
564 618
{
565
    union {
566
        float f;
567
        uint32_t i;
568
    } p;
569
    uint32_t exp, mant;
619
    uint32_t r = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
620
    return *(float32*)(&r);
621
}
570 622

  
571
    p.f = fi;
572
    exp = (p.i >> 23) & 0xFF;
573
    mant = p.i & 0x007FFFFF;
574
    if (exp == 255) {
575
        /* NaN or infinity */
576
        p.i = 1;
577
    } else if (exp == 0) {
578
        if (mant == 0) {
579
            /* Zero */
580
            p.i = 0;
581
        } else {
582
            /* Denormalized */
583
            p.f *= 2.0;
584
        }
585
    } else {
586
        if (exp >= 253) {
587
            /* Overflow */
588
            p.i = 1;
589
        } else {
590
            p.f *= 4.0;
591
        }
592
    }
623
uint32_t helper_s_to_memory (uint64_t a)
624
{
625
    /* Memory format is the same as float32 */
626
    float32 fa = s_to_float32(a);
627
    return *(uint32_t*)(&fa);
628
}
593 629

  
594
    return p.f;
630
uint64_t helper_memory_to_s (uint32_t a)
631
{
632
    /* Memory format is the same as float32 */
633
    return float32_to_s(*(float32*)(&a));
595 634
}
596 635

  
597
void helper_addf (void)
636
uint64_t helper_adds (uint64_t a, uint64_t b)
598 637
{
599
    float ft0, ft1, ft2;
638
    float32 fa, fb, fr;
600 639

  
601
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
602
        /* XXX: TODO */
603
    }
604
    ft0 = vaxf_to_ieee32(FT0);
605
    ft1 = vaxf_to_ieee32(FT1);
606
    ft2 = float32_add(ft0, ft1, &FP_STATUS);
607
    FT0 = ieee32_to_vaxf(ft2);
640
    fa = s_to_float32(a);
641
    fb = s_to_float32(b);
642
    fr = float32_add(fa, fb, &FP_STATUS);
643
    return float32_to_s(fr);
608 644
}
609 645

  
610
void helper_subf (void)
646
uint64_t helper_subs (uint64_t a, uint64_t b)
611 647
{
612
    float ft0, ft1, ft2;
648
    float32 fa, fb, fr;
613 649

  
614
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
615
        /* XXX: TODO */
616
    }
617
    ft0 = vaxf_to_ieee32(FT0);
618
    ft1 = vaxf_to_ieee32(FT1);
619
    ft2 = float32_sub(ft0, ft1, &FP_STATUS);
620
    FT0 = ieee32_to_vaxf(ft2);
650
    fa = s_to_float32(a);
651
    fb = s_to_float32(b);
652
    fr = float32_sub(fa, fb, &FP_STATUS);
653
    return float32_to_s(fr);
621 654
}
622 655

  
623
void helper_mulf (void)
656
uint64_t helper_muls (uint64_t a, uint64_t b)
624 657
{
625
    float ft0, ft1, ft2;
658
    float32 fa, fb, fr;
626 659

  
627
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
628
        /* XXX: TODO */
629
    }
630
    ft0 = vaxf_to_ieee32(FT0);
631
    ft1 = vaxf_to_ieee32(FT1);
632
    ft2 = float32_mul(ft0, ft1, &FP_STATUS);
633
    FT0 = ieee32_to_vaxf(ft2);
660
    fa = s_to_float32(a);
661
    fb = s_to_float32(b);
662
    fr = float32_mul(fa, fb, &FP_STATUS);
663
    return float32_to_s(fr);
634 664
}
635 665

  
636
void helper_divf (void)
666
uint64_t helper_divs (uint64_t a, uint64_t b)
637 667
{
638
    float ft0, ft1, ft2;
668
    float32 fa, fb, fr;
639 669

  
640
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
641
        /* XXX: TODO */
642
    }
643
    ft0 = vaxf_to_ieee32(FT0);
644
    ft1 = vaxf_to_ieee32(FT1);
645
    ft2 = float32_div(ft0, ft1, &FP_STATUS);
646
    FT0 = ieee32_to_vaxf(ft2);
670
    fa = s_to_float32(a);
671
    fb = s_to_float32(b);
672
    fr = float32_div(fa, fb, &FP_STATUS);
673
    return float32_to_s(fr);
647 674
}
648 675

  
649
void helper_sqrtf (void)
676
uint64_t helper_sqrts (uint64_t a)
650 677
{
651
    float ft0, ft1;
678
    float32 fa, fr;
652 679

  
653
    if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
654
        /* XXX: TODO */
655
    }
656
    ft0 = vaxf_to_ieee32(FT0);
657
    ft1 = float32_sqrt(ft0, &FP_STATUS);
658
    FT0 = ieee32_to_vaxf(ft1);
680
    fa = s_to_float32(a);
681
    fr = float32_sqrt(fa, &FP_STATUS);
682
    return float32_to_s(fr);
659 683
}
660 684

  
661
void helper_itoff (void)
685

  
686
/* T floating (double) */
687
static always_inline float64 t_to_float64 (uint64_t a)
662 688
{
663
    /* XXX: TODO */
689
    /* Memory format is the same as float64 */
690
    return *(float64*)(&a);
664 691
}
665 692

  
666
static always_inline int vaxg_is_valid (double ff)
693
static always_inline uint64_t float64_to_t (float64 fa)
667 694
{
668
    union {
669
        double f;
670
        uint64_t i;
671
    } p;
672
    uint64_t exp, mant;
695
    /* Memory format is the same as float64 */
696
    return *(uint64*)(&fa);
697
}
673 698

  
674
    p.f = ff;
675
    exp = (p.i >> 52) & 0x7FF;
676
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
677
    if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
678
        /* Reserved operands / Dirty zero */
679
        return 0;
680
    }
699
uint64_t helper_addt (uint64_t a, uint64_t b)
700
{
701
    float64 fa, fb, fr;
681 702

  
682
    return 1;
703
    fa = t_to_float64(a);
704
    fb = t_to_float64(b);
705
    fr = float64_add(fa, fb, &FP_STATUS);
706
    return float64_to_t(fr);
683 707
}
684 708

  
685
static always_inline double vaxg_to_ieee64 (double fg)
709
uint64_t helper_subt (uint64_t a, uint64_t b)
686 710
{
687
    union {
688
        double f;
689
        uint64_t i;
690
    } p;
691
    uint32_t exp;
711
    float64 fa, fb, fr;
692 712

  
693
    p.f = fg;
694
    exp = (p.i >> 52) & 0x7FF;
695
    if (exp < 3) {
696
        /* Underflow */
697
        p.f = 0.0;
698
    } else {
699
        p.f *= 0.25;
700
    }
701

  
702
    return p.f;
713
    fa = t_to_float64(a);
714
    fb = t_to_float64(b);
715
    fr = float64_sub(fa, fb, &FP_STATUS);
716
    return float64_to_t(fr);
703 717
}
704 718

  
705
static always_inline double ieee64_to_vaxg (double fi)
719
uint64_t helper_mult (uint64_t a, uint64_t b)
706 720
{
707
    union {
708
        double f;
709
        uint64_t i;
710
    } p;
711
    uint64_t mant;
712
    uint32_t exp;
713

  
714
    p.f = fi;
715
    exp = (p.i >> 52) & 0x7FF;
716
    mant = p.i & 0x000FFFFFFFFFFFFFULL;
717
    if (exp == 255) {
718
        /* NaN or infinity */
719
        p.i = 1; /* VAX dirty zero */
720
    } else if (exp == 0) {
721
        if (mant == 0) {
722
            /* Zero */
723
            p.i = 0;
724
        } else {
725
            /* Denormalized */
726
            p.f *= 2.0;
727
        }
728
    } else {
729
        if (exp >= 2045) {
730
            /* Overflow */
731
            p.i = 1; /* VAX dirty zero */
732
        } else {
733
            p.f *= 4.0;
734
        }
735
    }
721
    float64 fa, fb, fr;
736 722

  
737
    return p.f;
723
    fa = t_to_float64(a);
724
    fb = t_to_float64(b);
725
    fr = float64_mul(fa, fb, &FP_STATUS);
726
    return float64_to_t(fr);
738 727
}
739 728

  
740
void helper_addg (void)
729
uint64_t helper_divt (uint64_t a, uint64_t b)
741 730
{
742
    double ft0, ft1, ft2;
731
    float64 fa, fb, fr;
743 732

  
744
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
745
        /* XXX: TODO */
746
    }
747
    ft0 = vaxg_to_ieee64(FT0);
748
    ft1 = vaxg_to_ieee64(FT1);
749
    ft2 = float64_add(ft0, ft1, &FP_STATUS);
750
    FT0 = ieee64_to_vaxg(ft2);
733
    fa = t_to_float64(a);
734
    fb = t_to_float64(b);
735
    fr = float64_div(fa, fb, &FP_STATUS);
736
    return float64_to_t(fr);
751 737
}
752 738

  
753
void helper_subg (void)
739
uint64_t helper_sqrtt (uint64_t a)
754 740
{
755
    double ft0, ft1, ft2;
741
    float64 fa, fr;
756 742

  
757
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
758
        /* XXX: TODO */
759
    }
760
    ft0 = vaxg_to_ieee64(FT0);
761
    ft1 = vaxg_to_ieee64(FT1);
762
    ft2 = float64_sub(ft0, ft1, &FP_STATUS);
763
    FT0 = ieee64_to_vaxg(ft2);
743
    fa = t_to_float64(a);
744
    fr = float64_sqrt(fa, &FP_STATUS);
745
    return float64_to_t(fr);
764 746
}
765 747

  
766
void helper_mulg (void)
767
{
768
    double ft0, ft1, ft2;
769 748

  
770
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
771
        /* XXX: TODO */
772
    }
773
    ft0 = vaxg_to_ieee64(FT0);
774
    ft1 = vaxg_to_ieee64(FT1);
775
    ft2 = float64_mul(ft0, ft1, &FP_STATUS);
776
    FT0 = ieee64_to_vaxg(ft2);
749
/* Sign copy */
750
uint64_t helper_cpys(uint64_t a, uint64_t b)
751
{
752
    return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
777 753
}
778 754

  
779
void helper_divg (void)
755
uint64_t helper_cpysn(uint64_t a, uint64_t b)
780 756
{
781
    double ft0, ft1, ft2;
757
    return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
758
}
782 759

  
783
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
784
        /* XXX: TODO */
785
    }
786
    ft0 = vaxg_to_ieee64(FT0);
787
    ft1 = vaxg_to_ieee64(FT1);
788
    ft2 = float64_div(ft0, ft1, &FP_STATUS);
789
    FT0 = ieee64_to_vaxg(ft2);
760
uint64_t helper_cpyse(uint64_t a, uint64_t b)
761
{
762
    return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
790 763
}
791 764

  
792
void helper_sqrtg (void)
765

  
766
/* Comparisons */
767
uint64_t helper_cmptun (uint64_t a, uint64_t b)
793 768
{
794
    double ft0, ft1;
769
    float64 fa, fb;
795 770

  
796
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
797
        /* XXX: TODO */
798
    }
799
    ft0 = vaxg_to_ieee64(FT0);
800
    ft1 = float64_sqrt(ft0, &FP_STATUS);
801
    FT0 = ieee64_to_vaxg(ft1);
771
    fa = t_to_float64(a);
772
    fb = t_to_float64(b);
773

  
774
    if (float64_is_nan(fa) || float64_is_nan(fb))
775
        return 0x4000000000000000ULL;
776
    else
777
        return 0;
802 778
}
803 779

  
804
void helper_cmpgeq (void)
780
uint64_t helper_cmpteq(uint64_t a, uint64_t b)
805 781
{
806
    union {
807
        double d;
808
        uint64_t u;
809
    } p;
810
    double ft0, ft1;
782
    float64 fa, fb;
811 783

  
812
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
813
        /* XXX: TODO */
814
    }
815
    ft0 = vaxg_to_ieee64(FT0);
816
    ft1 = vaxg_to_ieee64(FT1);
817
    p.u = 0;
818
    if (float64_eq(ft0, ft1, &FP_STATUS))
819
        p.u = 0x4000000000000000ULL;
820
    FT0 = p.d;
784
    fa = t_to_float64(a);
785
    fb = t_to_float64(b);
786

  
787
    if (float64_eq(fa, fb, &FP_STATUS))
788
        return 0x4000000000000000ULL;
789
    else
790
        return 0;
821 791
}
822 792

  
823
void helper_cmpglt (void)
793
uint64_t helper_cmptle(uint64_t a, uint64_t b)
824 794
{
825
    union {
826
        double d;
827
        uint64_t u;
828
    } p;
829
    double ft0, ft1;
795
    float64 fa, fb;
830 796

  
831
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
832
        /* XXX: TODO */
833
    }
834
    ft0 = vaxg_to_ieee64(FT0);
835
    ft1 = vaxg_to_ieee64(FT1);
836
    p.u = 0;
837
    if (float64_lt(ft0, ft1, &FP_STATUS))
838
        p.u = 0x4000000000000000ULL;
839
    FT0 = p.d;
797
    fa = t_to_float64(a);
798
    fb = t_to_float64(b);
799

  
800
    if (float64_le(fa, fb, &FP_STATUS))
801
        return 0x4000000000000000ULL;
802
    else
803
        return 0;
840 804
}
841 805

  
842
void helper_cmpgle (void)
806
uint64_t helper_cmptlt(uint64_t a, uint64_t b)
843 807
{
844
    union {
845
        double d;
846
        uint64_t u;
847
    } p;
848
    double ft0, ft1;
808
    float64 fa, fb;
849 809

  
850
    if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
851
        /* XXX: TODO */
852
    }
853
    ft0 = vaxg_to_ieee64(FT0);
854
    ft1 = vaxg_to_ieee64(FT1);
855
    p.u = 0;
856
    if (float64_le(ft0, ft1, &FP_STATUS))
857
        p.u = 0x4000000000000000ULL;
858
    FT0 = p.d;
810
    fa = t_to_float64(a);
811
    fb = t_to_float64(b);
812

  
813
    if (float64_lt(fa, fb, &FP_STATUS))
814
        return 0x4000000000000000ULL;
815
    else
816
        return 0;
859 817
}
860 818

  
861
void helper_cvtqs (void)
819
uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
862 820
{
863
    union {
864
        double d;
865
        uint64_t u;
866
    } p;
821
    float64 fa, fb;
867 822

  
868
    p.d = FT0;
869
    FT0 = (float)p.u;
823
    fa = g_to_float64(a);
824
    fb = g_to_float64(b);
825

  
826
    if (float64_eq(fa, fb, &FP_STATUS))
827
        return 0x4000000000000000ULL;
828
    else
829
        return 0;
870 830
}
871 831

  
872
void helper_cvttq (void)
832
uint64_t helper_cmpgle(uint64_t a, uint64_t b)
873 833
{
874
    union {
875
        double d;
876
        uint64_t u;
877
    } p;
834
    float64 fa, fb;
835

  
836
    fa = g_to_float64(a);
837
    fb = g_to_float64(b);
878 838

  
879
    p.u = FT0;
880
    FT0 = p.d;
839
    if (float64_le(fa, fb, &FP_STATUS))
840
        return 0x4000000000000000ULL;
841
    else
842
        return 0;
881 843
}
882 844

  
883
void helper_cvtqt (void)
845
uint64_t helper_cmpglt(uint64_t a, uint64_t b)
884 846
{
885
    union {
886
        double d;
887
        uint64_t u;
888
    } p;
847
    float64 fa, fb;
848

  
849
    fa = g_to_float64(a);
850
    fb = g_to_float64(b);
889 851

  
890
    p.d = FT0;
891
    FT0 = p.u;
852
    if (float64_lt(fa, fb, &FP_STATUS))
853
        return 0x4000000000000000ULL;
854
    else
855
        return 0;
892 856
}
893 857

  
894
void helper_cvtqf (void)
858
uint64_t helper_cmpfeq (uint64_t a)
895 859
{
896
    union {
897
        double d;
898
        uint64_t u;
899
    } p;
900

  
901
    p.d = FT0;
902
    FT0 = ieee32_to_vaxf(p.u);
860
    return !(a & 0x7FFFFFFFFFFFFFFFULL);
903 861
}
904 862

  
905
void helper_cvtgf (void)
863
uint64_t helper_cmpfne (uint64_t a)
906 864
{
907
    double ft0;
865
    return (a & 0x7FFFFFFFFFFFFFFFULL);
866
}
908 867

  
909
    ft0 = vaxg_to_ieee64(FT0);
910
    FT0 = ieee32_to_vaxf(ft0);
868
uint64_t helper_cmpflt (uint64_t a)
869
{
870
    return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
911 871
}
912 872

  
913
void helper_cvtgd (void)
873
uint64_t helper_cmpfle (uint64_t a)
914 874
{
915
    /* XXX: TODO */
875
    return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
916 876
}
917 877

  
918
void helper_cvtgq (void)
878
uint64_t helper_cmpfgt (uint64_t a)
919 879
{
920
    union {
921
        double d;
922
        uint64_t u;
923
    } p;
880
    return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
881
}
924 882

  
925
    p.u = vaxg_to_ieee64(FT0);
926
    FT0 = p.d;
883
uint64_t helper_cmpfge (uint64_t a)
884
{
885
    return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
927 886
}
928 887

  
929
void helper_cvtqg (void)
888

  
889
/* Floating point format conversion */
890
uint64_t helper_cvtts (uint64_t a)
930 891
{
931
    union {
932
        double d;
933
        uint64_t u;
934
    } p;
892
    float64 fa;
893
    float32 fr;
935 894

  
936
    p.d = FT0;
937
    FT0 = ieee64_to_vaxg(p.u);
895
    fa = t_to_float64(a);
896
    fr = float64_to_float32(fa, &FP_STATUS);
897
    return float32_to_s(fr);
938 898
}
939 899

  
940
void helper_cvtdg (void)
900
uint64_t helper_cvtst (uint64_t a)
941 901
{
942
    /* XXX: TODO */
902
    float32 fa;
903
    float64 fr;
904

  
905
    fa = s_to_float32(a);
906
    fr = float32_to_float64(fa, &FP_STATUS);
907
    return float64_to_t(fr);
943 908
}
944 909

  
945
void helper_cvtlq (void)
910
uint64_t helper_cvtqs (uint64_t a)
946 911
{
947
    union {
948
        double d;
949
        uint64_t u;
950
    } p, q;
951

  
952
    p.d = FT0;
953
    q.u = (p.u >> 29) & 0x3FFFFFFF;
954
    q.u |= (p.u >> 32);
955
    q.u = (int64_t)((int32_t)q.u);
956
    FT0 = q.d;
912
    float32 fr = int64_to_float32(a, &FP_STATUS);
913
    return float32_to_s(fr);
957 914
}
958 915

  
959
static always_inline void __helper_cvtql (int s, int v)
916
uint64_t helper_cvttq (uint64_t a)
960 917
{
961
    union {
962
        double d;
963
        uint64_t u;
964
    } p, q;
918
    float64 fa = t_to_float64(a);
919
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
920
}
965 921

  
966
    p.d = FT0;
967
    q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
968
    q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
969
    FT0 = q.d;
970
    if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
971
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
972
    }
973
    if (s) {
974
        /* TODO */
975
    }
922
uint64_t helper_cvtqt (uint64_t a)
923
{
924
    float64 fr = int64_to_float64(a, &FP_STATUS);
925
    return float64_to_t(fr);
976 926
}
977 927

  
978
void helper_cvtql (void)
928
uint64_t helper_cvtqf (uint64_t a)
979 929
{
980
    __helper_cvtql(0, 0);
930
    float32 fr = int64_to_float32(a, &FP_STATUS);
931
    return float32_to_f(fr);
981 932
}
982 933

  
983
void helper_cvtqlv (void)
934
uint64_t helper_cvtgf (uint64_t a)
984 935
{
985
    __helper_cvtql(0, 1);
936
    float64 fa;
937
    float32 fr;
938

  
939
    fa = g_to_float64(a);
940
    fr = float64_to_float32(fa, &FP_STATUS);
941
    return float32_to_f(fr);
986 942
}
987 943

  
988
void helper_cvtqlsv (void)
944
uint64_t helper_cvtgq (uint64_t a)
989 945
{
990
    __helper_cvtql(1, 1);
946
    float64 fa = g_to_float64(a);
947
    return float64_to_int64_round_to_zero(fa, &FP_STATUS);
991 948
}
992 949

  
993
void helper_cmpfeq (void)
950
uint64_t helper_cvtqg (uint64_t a)
994 951
{
995
    if (float64_eq(FT0, FT1, &FP_STATUS))
996
        T0 = 1;
997
    else
998
        T0 = 0;
952
    float64 fr;
953
    fr = int64_to_float64(a, &FP_STATUS);
954
    return float64_to_g(fr);
999 955
}
1000 956

  
1001
void helper_cmpfne (void)
957
uint64_t helper_cvtlq (uint64_t a)
1002 958
{
1003
    if (float64_eq(FT0, FT1, &FP_STATUS))
1004
        T0 = 0;
1005
    else
1006
        T0 = 1;
959
    return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
1007 960
}
1008 961

  
1009
void helper_cmpflt (void)
962
static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v)
1010 963
{
1011
    if (float64_lt(FT0, FT1, &FP_STATUS))
1012
        T0 = 1;
1013
    else
1014
        T0 = 0;
964
    uint64_t r;
965

  
966
    r = ((uint64_t)(a & 0xC0000000)) << 32;
967
    r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
968

  
969
    if (v && (int64_t)((int32_t)r) != (int64_t)r) {
970
        helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
971
    }
972
    if (s) {
973
        /* TODO */
974
    }
975
    return r;
1015 976
}
1016 977

  
1017
void helper_cmpfle (void)
978
uint64_t helper_cvtql (uint64_t a)
1018 979
{
1019
    if (float64_lt(FT0, FT1, &FP_STATUS))
1020
        T0 = 1;
1021
    else
1022
        T0 = 0;
980
    return __helper_cvtql(a, 0, 0);
1023 981
}
1024 982

  
1025
void helper_cmpfgt (void)
983
uint64_t helper_cvtqlv (uint64_t a)
1026 984
{
1027
    if (float64_le(FT0, FT1, &FP_STATUS))
1028
        T0 = 0;
1029
    else
1030
        T0 = 1;
985
    return __helper_cvtql(a, 0, 1);
1031 986
}
1032 987

  
1033
void helper_cmpfge (void)
988
uint64_t helper_cvtqlsv (uint64_t a)
1034 989
{
1035
    if (float64_lt(FT0, FT1, &FP_STATUS))
1036
        T0 = 0;
1037
    else
1038
        T0 = 1;
990
    return __helper_cvtql(a, 1, 1);
1039 991
}
1040 992

  
1041 993
#if !defined (CONFIG_USER_ONLY)
......
1053 1005
}
1054 1006
#endif
1055 1007

  
1056
#if defined(HOST_SPARC) || defined(HOST_SPARC64)
1057
void helper_reset_FT0 (void)
1058
{
1059
    FT0 = 0;
1060
}
1061

  
1062
void helper_reset_FT1 (void)
1063
{
1064
    FT1 = 0;
1065
}
1066

  
1067
void helper_reset_FT2 (void)
1068
{
1069
    FT2 = 0;
1070
}
1071
#endif
1072

  
1073 1008
/*****************************************************************************/
1074 1009
/* Softmmu support */
1075 1010
#if !defined (CONFIG_USER_ONLY)

Also available in: Unified diff