Revision bffe1431 tcg/sparc/tcg-target.c

b/tcg/sparc/tcg-target.c
491 491
};
492 492
#endif
493 493

  
494
#if TARGET_LONG_BITS == 32
495
#define TARGET_LD_OP LDUW
496
#else
497
#define TARGET_LD_OP LDX
498
#endif
499

  
500
#ifdef __arch64__
501
#define HOST_LD_OP LDX
502
#define HOST_ST_OP STX
503
#define HOST_SLL_OP SHIFT_SLLX
504
#define HOST_SRA_OP SHIFT_SRAX
505
#else
506
#define HOST_LD_OP LDUW
507
#define HOST_ST_OP STW
508
#define HOST_SLL_OP SHIFT_SLL
509
#define HOST_SRA_OP SHIFT_SRA
510
#endif
511

  
494 512
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
495 513
                            int opc)
496 514
{
497 515
    int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
498
    int target_ld_op, host_ld_op, sll_op, sra_op;
499 516
#if defined(CONFIG_SOFTMMU)
500 517
    uint32_t *label1_ptr, *label2_ptr;
501 518
#endif
......
509 526
    arg1 = TCG_REG_O1;
510 527
    arg2 = TCG_REG_O2;
511 528

  
512
#if TARGET_LONG_BITS == 32
513
    target_ld_op = LDUW;
514
#else
515
    target_ld_op = LDX;
516
#endif
517

  
518
#ifdef __arch64__
519
    host_ld_op = LDX;
520
    sll_op = SHIFT_SLLX;
521
    sra_op = SHIFT_SRAX;
522
#else
523
    host_ld_op = LDUW;
524
    sll_op = SHIFT_SLL;
525
    sra_op = SHIFT_SRA;
526
#endif
527

  
528

  
529 529
#if defined(CONFIG_SOFTMMU)
530 530
    /* srl addr_reg, x, arg1 */
531 531
    tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
......
545 545
    tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
546 546

  
547 547
    /* ld [arg1], arg2 */
548
    tcg_out32(s, target_ld_op | INSN_RD(arg2) | INSN_RS1(arg1) |
548
    tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
549 549
              INSN_RS2(TCG_REG_G0));
550 550

  
551 551
    /* subcc arg0, arg2, %g0 */
......
559 559
    /* mov (delay slot) */
560 560
    tcg_out_mov(s, arg0, addr_reg);
561 561

  
562
    /* mov */
563
    tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
564

  
562 565
    /* XXX: move that code at the end of the TB */
563 566
    /* qemu_ld_helper[s_bits](arg0, arg1) */
564 567
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
565 568
                           - (tcg_target_ulong)s->code_ptr) >> 2)
566 569
                         & 0x3fffffff));
567
    /* mov (delay slot) */
568
    tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
570
    /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
571
       global registers */
572
    // delay slot
573
    tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
574
                 TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
575
    tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
576
                 TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
569 577

  
570 578
    /* data_reg = sign_extend(arg0) */
571 579
    switch(opc) {
572 580
    case 0 | 4:
573 581
        /* sll arg0, 24/56, data_reg */
574 582
        tcg_out_arithi(s, data_reg, arg0, (int)sizeof(tcg_target_long) * 8 - 8,
575
                       sll_op);
583
                       HOST_SLL_OP);
576 584
        /* sra data_reg, 24/56, data_reg */
577 585
        tcg_out_arithi(s, data_reg, data_reg,
578
                       (int)sizeof(tcg_target_long) * 8 - 8, sra_op);
586
                       (int)sizeof(tcg_target_long) * 8 - 8, HOST_SRA_OP);
579 587
        break;
580 588
    case 1 | 4:
581 589
        /* sll arg0, 16/48, data_reg */
582 590
        tcg_out_arithi(s, data_reg, arg0,
583
                       (int)sizeof(tcg_target_long) * 8 - 16, sll_op);
591
                       (int)sizeof(tcg_target_long) * 8 - 16, HOST_SLL_OP);
584 592
        /* sra data_reg, 16/48, data_reg */
585 593
        tcg_out_arithi(s, data_reg, data_reg,
586
                       (int)sizeof(tcg_target_long) * 8 - 16, sra_op);
594
                       (int)sizeof(tcg_target_long) * 8 - 16, HOST_SRA_OP);
587 595
        break;
588 596
    case 2 | 4:
589 597
        /* sll arg0, 32, data_reg */
590
        tcg_out_arithi(s, data_reg, arg0, 32, sll_op);
598
        tcg_out_arithi(s, data_reg, arg0, 32, HOST_SLL_OP);
591 599
        /* sra data_reg, 32, data_reg */
592
        tcg_out_arithi(s, data_reg, data_reg, 32, sra_op);
600
        tcg_out_arithi(s, data_reg, data_reg, 32, HOST_SRA_OP);
593 601
        break;
594 602
    case 0:
595 603
    case 1:
......
616 624

  
617 625
    /* ld [arg1 + x], arg1 */
618 626
    tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
619
                 offsetof(CPUTLBEntry, addr_read), host_ld_op);
627
                 offsetof(CPUTLBEntry, addr_read), HOST_LD_OP);
620 628
    /* add addr_reg, arg1, arg0 */
621 629
    tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
622 630
#else
......
693 701
                            int opc)
694 702
{
695 703
    int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
696
    int target_ld_op, host_ld_op;
697 704
#if defined(CONFIG_SOFTMMU)
698 705
    uint32_t *label1_ptr, *label2_ptr;
699 706
#endif
......
708 715
    arg1 = TCG_REG_O1;
709 716
    arg2 = TCG_REG_O2;
710 717

  
711
#if TARGET_LONG_BITS == 32
712
    target_ld_op = LDUW;
713
#else
714
    target_ld_op = LDX;
715
#endif
716

  
717
#ifdef __arch64__
718
    host_ld_op = LDX;
719
#else
720
    host_ld_op = LDUW;
721
#endif
722

  
723 718
#if defined(CONFIG_SOFTMMU)
724 719
    /* srl addr_reg, x, arg1 */
725 720
    tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
......
740 735
    tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
741 736

  
742 737
    /* ld [arg1], arg2 */
743
    tcg_out32(s, target_ld_op | INSN_RD(arg2) | INSN_RS1(arg1) |
738
    tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
744 739
              INSN_RS2(TCG_REG_G0));
745 740

  
746 741
    /* subcc arg0, arg2, %g0 */
......
757 752
    /* mov */
758 753
    tcg_out_mov(s, arg1, data_reg);
759 754

  
755
    /* mov */
756
    tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
757

  
760 758
    /* XXX: move that code at the end of the TB */
761 759
    /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
762 760
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[s_bits]
763 761
                           - (tcg_target_ulong)s->code_ptr) >> 2)
764 762
                         & 0x3fffffff));
765
    /* mov (delay slot) */
766
    tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
763
    /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
764
       global registers */
765
    // delay slot
766
    tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
767
                 TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
768
    tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
769
                 TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
767 770

  
768 771
    /* will become:
769 772
       ba label2 */
......
780 783

  
781 784
    /* ld [arg1 + x], arg1 */
782 785
    tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
783
                 offsetof(CPUTLBEntry, addr_write), host_ld_op);
786
                 offsetof(CPUTLBEntry, addr_write), HOST_LD_OP);
784 787

  
785 788
    /* add addr_reg, arg1, arg0 */
786 789
    tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
......
862 865
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
863 866
        break;
864 867
    case INDEX_op_call:
865
        {
866
            unsigned int st_op, ld_op;
867

  
868
#ifdef __arch64__
869
            st_op = STX;
870
            ld_op = LDX;
871
#else
872
            st_op = STW;
873
            ld_op = LDUW;
874
#endif
875
            if (const_args[0])
876
                tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
877
                                       - (tcg_target_ulong)s->code_ptr) >> 2)
878
                                     & 0x3fffffff));
879
            else {
880
                tcg_out_ld_ptr(s, TCG_REG_I5,
881
                               (tcg_target_long)(s->tb_next + args[0]));
882
                tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
883
                          INSN_RS2(TCG_REG_G0));
884
            }
885
            /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
886
               global registers */
887
            tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
888
                         TCG_TARGET_CALL_STACK_OFFSET - sizeof(long),
889
                         st_op); // delay slot
890
            tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
891
                         TCG_TARGET_CALL_STACK_OFFSET - sizeof(long),
892
                         ld_op);
868
        if (const_args[0])
869
            tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
870
                                   - (tcg_target_ulong)s->code_ptr) >> 2)
871
                                 & 0x3fffffff));
872
        else {
873
            tcg_out_ld_ptr(s, TCG_REG_I5,
874
                           (tcg_target_long)(s->tb_next + args[0]));
875
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
876
                      INSN_RS2(TCG_REG_G0));
893 877
        }
878
        /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
879
           global registers */
880
        // delay slot
881
        tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
882
                     TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
883
        tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
884
                     TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
894 885
        break;
895 886
    case INDEX_op_jmp:
896 887
    case INDEX_op_br:

Also available in: Unified diff