693 |
693 |
}
|
694 |
694 |
|
695 |
695 |
#if defined(TARGET_PPC64)
|
696 |
|
static always_inline int slb_is_valid (uint64_t slb64)
|
|
696 |
static ppc_slb_t *slb_get_entry(CPUPPCState *env, int nr)
|
697 |
697 |
{
|
698 |
|
return slb64 & 0x0000000008000000ULL ? 1 : 0;
|
|
698 |
ppc_slb_t *retval = &env->slb[nr];
|
|
699 |
|
|
700 |
#if 0 // XXX implement bridge mode?
|
|
701 |
if (env->spr[SPR_ASR] & 1) {
|
|
702 |
target_phys_addr_t sr_base;
|
|
703 |
|
|
704 |
sr_base = env->spr[SPR_ASR] & 0xfffffffffffff000;
|
|
705 |
sr_base += (12 * nr);
|
|
706 |
|
|
707 |
retval->tmp64 = ldq_phys(sr_base);
|
|
708 |
retval->tmp = ldl_phys(sr_base + 8);
|
|
709 |
}
|
|
710 |
#endif
|
|
711 |
|
|
712 |
return retval;
|
|
713 |
}
|
|
714 |
|
|
715 |
static void slb_set_entry(CPUPPCState *env, int nr, ppc_slb_t *slb)
|
|
716 |
{
|
|
717 |
ppc_slb_t *entry = &env->slb[nr];
|
|
718 |
|
|
719 |
if (slb == entry)
|
|
720 |
return;
|
|
721 |
|
|
722 |
entry->tmp64 = slb->tmp64;
|
|
723 |
entry->tmp = slb->tmp;
|
|
724 |
}
|
|
725 |
|
|
726 |
static always_inline int slb_is_valid (ppc_slb_t *slb)
|
|
727 |
{
|
|
728 |
return (int)(slb->tmp64 & 0x0000000008000000ULL);
|
699 |
729 |
}
|
700 |
730 |
|
701 |
|
static always_inline void slb_invalidate (uint64_t *slb64)
|
|
731 |
static always_inline void slb_invalidate (ppc_slb_t *slb)
|
702 |
732 |
{
|
703 |
|
*slb64 &= ~0x0000000008000000ULL;
|
|
733 |
slb->tmp64 &= ~0x0000000008000000ULL;
|
704 |
734 |
}
|
705 |
735 |
|
706 |
736 |
static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr,
|
... | ... | |
708 |
738 |
target_ulong *page_mask, int *attr,
|
709 |
739 |
int *target_page_bits)
|
710 |
740 |
{
|
711 |
|
target_phys_addr_t sr_base;
|
712 |
741 |
target_ulong mask;
|
713 |
|
uint64_t tmp64;
|
714 |
|
uint32_t tmp;
|
715 |
742 |
int n, ret;
|
716 |
743 |
|
717 |
744 |
ret = -5;
|
718 |
|
sr_base = env->spr[SPR_ASR];
|
719 |
|
LOG_SLB("%s: eaddr " ADDRX " base " PADDRX "\n",
|
720 |
|
__func__, eaddr, sr_base);
|
|
745 |
LOG_SLB("%s: eaddr " ADDRX "\n", __func__, eaddr);
|
721 |
746 |
mask = 0x0000000000000000ULL; /* Avoid gcc warning */
|
722 |
747 |
for (n = 0; n < env->slb_nr; n++) {
|
723 |
|
tmp64 = ldq_phys(sr_base);
|
724 |
|
tmp = ldl_phys(sr_base + 8);
|
725 |
|
LOG_SLB("%s: seg %d " PADDRX " %016" PRIx64 " %08"
|
726 |
|
PRIx32 "\n", __func__, n, sr_base, tmp64, tmp);
|
727 |
|
if (slb_is_valid(tmp64)) {
|
|
748 |
ppc_slb_t *slb = slb_get_entry(env, n);
|
|
749 |
|
|
750 |
LOG_SLB("%s: seg %d %016" PRIx64 " %08"
|
|
751 |
PRIx32 "\n", __func__, n, slb->tmp64, slb->tmp);
|
|
752 |
if (slb_is_valid(slb)) {
|
728 |
753 |
/* SLB entry is valid */
|
729 |
|
if (tmp & 0x8) {
|
|
754 |
if (slb->tmp & 0x8) {
|
730 |
755 |
/* 1 TB Segment */
|
731 |
756 |
mask = 0xFFFF000000000000ULL;
|
732 |
757 |
if (target_page_bits)
|
... | ... | |
737 |
762 |
if (target_page_bits)
|
738 |
763 |
*target_page_bits = TARGET_PAGE_BITS;
|
739 |
764 |
}
|
740 |
|
if ((eaddr & mask) == (tmp64 & mask)) {
|
|
765 |
if ((eaddr & mask) == (slb->tmp64 & mask)) {
|
741 |
766 |
/* SLB match */
|
742 |
|
*vsid = ((tmp64 << 24) | (tmp >> 8)) & 0x0003FFFFFFFFFFFFULL;
|
|
767 |
*vsid = ((slb->tmp64 << 24) | (slb->tmp >> 8)) & 0x0003FFFFFFFFFFFFULL;
|
743 |
768 |
*page_mask = ~mask;
|
744 |
|
*attr = tmp & 0xFF;
|
|
769 |
*attr = slb->tmp & 0xFF;
|
745 |
770 |
ret = n;
|
746 |
771 |
break;
|
747 |
772 |
}
|
748 |
773 |
}
|
749 |
|
sr_base += 12;
|
750 |
774 |
}
|
751 |
775 |
|
752 |
776 |
return ret;
|
... | ... | |
754 |
778 |
|
755 |
779 |
void ppc_slb_invalidate_all (CPUPPCState *env)
|
756 |
780 |
{
|
757 |
|
target_phys_addr_t sr_base;
|
758 |
|
uint64_t tmp64;
|
759 |
781 |
int n, do_invalidate;
|
760 |
782 |
|
761 |
783 |
do_invalidate = 0;
|
762 |
|
sr_base = env->spr[SPR_ASR];
|
763 |
784 |
/* XXX: Warning: slbia never invalidates the first segment */
|
764 |
785 |
for (n = 1; n < env->slb_nr; n++) {
|
765 |
|
tmp64 = ldq_phys(sr_base);
|
766 |
|
if (slb_is_valid(tmp64)) {
|
767 |
|
slb_invalidate(&tmp64);
|
768 |
|
stq_phys(sr_base, tmp64);
|
|
786 |
ppc_slb_t *slb = slb_get_entry(env, n);
|
|
787 |
|
|
788 |
if (slb_is_valid(slb)) {
|
|
789 |
slb_invalidate(slb);
|
|
790 |
slb_set_entry(env, n, slb);
|
769 |
791 |
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
770 |
792 |
* and we still don't have a tlb_flush_mask(env, n, mask)
|
771 |
793 |
* in Qemu, we just invalidate all TLBs
|
772 |
794 |
*/
|
773 |
795 |
do_invalidate = 1;
|
774 |
796 |
}
|
775 |
|
sr_base += 12;
|
776 |
797 |
}
|
777 |
798 |
if (do_invalidate)
|
778 |
799 |
tlb_flush(env, 1);
|
... | ... | |
780 |
801 |
|
781 |
802 |
void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0)
|
782 |
803 |
{
|
783 |
|
target_phys_addr_t sr_base;
|
784 |
804 |
target_ulong vsid, page_mask;
|
785 |
|
uint64_t tmp64;
|
786 |
805 |
int attr;
|
787 |
806 |
int n;
|
788 |
807 |
|
789 |
808 |
n = slb_lookup(env, T0, &vsid, &page_mask, &attr, NULL);
|
790 |
809 |
if (n >= 0) {
|
791 |
|
sr_base = env->spr[SPR_ASR];
|
792 |
|
sr_base += 12 * n;
|
793 |
|
tmp64 = ldq_phys(sr_base);
|
794 |
|
if (slb_is_valid(tmp64)) {
|
795 |
|
slb_invalidate(&tmp64);
|
796 |
|
stq_phys(sr_base, tmp64);
|
|
810 |
ppc_slb_t *slb = slb_get_entry(env, n);
|
|
811 |
|
|
812 |
if (slb_is_valid(slb)) {
|
|
813 |
slb_invalidate(slb);
|
|
814 |
slb_set_entry(env, n, slb);
|
797 |
815 |
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
798 |
816 |
* and we still don't have a tlb_flush_mask(env, n, mask)
|
799 |
817 |
* in Qemu, we just invalidate all TLBs
|
... | ... | |
805 |
823 |
|
806 |
824 |
target_ulong ppc_load_slb (CPUPPCState *env, int slb_nr)
|
807 |
825 |
{
|
808 |
|
target_phys_addr_t sr_base;
|
809 |
826 |
target_ulong rt;
|
810 |
|
uint64_t tmp64;
|
811 |
|
uint32_t tmp;
|
812 |
|
|
813 |
|
sr_base = env->spr[SPR_ASR];
|
814 |
|
sr_base += 12 * slb_nr;
|
815 |
|
tmp64 = ldq_phys(sr_base);
|
816 |
|
tmp = ldl_phys(sr_base + 8);
|
817 |
|
if (tmp64 & 0x0000000008000000ULL) {
|
|
827 |
ppc_slb_t *slb = slb_get_entry(env, slb_nr);
|
|
828 |
|
|
829 |
if (slb_is_valid(slb)) {
|
818 |
830 |
/* SLB entry is valid */
|
819 |
831 |
/* Copy SLB bits 62:88 to Rt 37:63 (VSID 23:49) */
|
820 |
|
rt = tmp >> 8; /* 65:88 => 40:63 */
|
821 |
|
rt |= (tmp64 & 0x7) << 24; /* 62:64 => 37:39 */
|
|
832 |
rt = slb->tmp >> 8; /* 65:88 => 40:63 */
|
|
833 |
rt |= (slb->tmp64 & 0x7) << 24; /* 62:64 => 37:39 */
|
822 |
834 |
/* Copy SLB bits 89:92 to Rt 33:36 (KsKpNL) */
|
823 |
|
rt |= ((tmp >> 4) & 0xF) << 27;
|
|
835 |
rt |= ((slb->tmp >> 4) & 0xF) << 27;
|
824 |
836 |
} else {
|
825 |
837 |
rt = 0;
|
826 |
838 |
}
|
827 |
|
LOG_SLB("%s: " PADDRX " %016" PRIx64 " %08" PRIx32 " => %d "
|
828 |
|
ADDRX "\n", __func__, sr_base, tmp64, tmp, slb_nr, rt);
|
|
839 |
LOG_SLB("%s: %016" PRIx64 " %08" PRIx32 " => %d "
|
|
840 |
ADDRX "\n", __func__, slb->tmp64, slb->tmp, slb_nr, rt);
|
829 |
841 |
|
830 |
842 |
return rt;
|
831 |
843 |
}
|
832 |
844 |
|
833 |
845 |
void ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs)
|
834 |
846 |
{
|
835 |
|
target_phys_addr_t sr_base;
|
836 |
|
uint64_t tmp64;
|
837 |
|
uint32_t tmp;
|
|
847 |
ppc_slb_t *slb;
|
838 |
848 |
|
839 |
849 |
uint64_t vsid;
|
840 |
850 |
uint64_t esid;
|
... | ... | |
847 |
857 |
valid = (rb & (1 << 27));
|
848 |
858 |
slb_nr = rb & 0xfff;
|
849 |
859 |
|
850 |
|
tmp64 = (esid << 28) | valid | (vsid >> 24);
|
851 |
|
tmp = (vsid << 8) | (flags << 3);
|
852 |
|
|
853 |
|
/* Write SLB entry to memory */
|
854 |
|
sr_base = env->spr[SPR_ASR];
|
855 |
|
sr_base += 12 * slb_nr;
|
|
860 |
slb = slb_get_entry(env, slb_nr);
|
|
861 |
slb->tmp64 = (esid << 28) | valid | (vsid >> 24);
|
|
862 |
slb->tmp = (vsid << 8) | (flags << 3);
|
856 |
863 |
|
857 |
|
LOG_SLB("%s: %d " ADDRX " - " ADDRX " => " PADDRX " %016" PRIx64
|
|
864 |
LOG_SLB("%s: %d " ADDRX " - " ADDRX " => %016" PRIx64
|
858 |
865 |
" %08" PRIx32 "\n", __func__,
|
859 |
|
slb_nr, rb, rs, sr_base, tmp64, tmp);
|
|
866 |
slb_nr, rb, rs, tmp64, tmp);
|
860 |
867 |
|
861 |
|
stq_phys(sr_base, tmp64);
|
862 |
|
stl_phys(sr_base + 8, tmp);
|
|
868 |
slb_set_entry(env, slb_nr, slb);
|
863 |
869 |
}
|
864 |
870 |
#endif /* defined(TARGET_PPC64) */
|
865 |
871 |
|