Revision 8bb6e981 target-alpha/op_helper.c
b/target-alpha/op_helper.c | ||
---|---|---|
22 | 22 |
#include "host-utils.h" |
23 | 23 |
#include "softfloat.h" |
24 | 24 |
|
25 |
#include "op_helper.h" |
|
26 |
|
|
27 | 25 |
void helper_tb_flush (void) |
28 | 26 |
{ |
29 | 27 |
tlb_flush(env, 1); |
30 | 28 |
} |
31 | 29 |
|
32 |
void cpu_dump_EA (target_ulong EA); |
|
33 |
void helper_print_mem_EA (target_ulong EA) |
|
34 |
{ |
|
35 |
cpu_dump_EA(EA); |
|
36 |
} |
|
37 |
|
|
38 | 30 |
/*****************************************************************************/ |
39 | 31 |
/* Exceptions processing helpers */ |
40 | 32 |
void helper_excp (int excp, int error) |
... | ... | |
990 | 982 |
return __helper_cvtql(a, 1, 1); |
991 | 983 |
} |
992 | 984 |
|
985 |
/* PALcode support special instructions */ |
|
993 | 986 |
#if !defined (CONFIG_USER_ONLY) |
994 |
void helper_mfpr (int iprn) |
|
987 |
void helper_hw_rei (void) |
|
988 |
{ |
|
989 |
env->pc = env->ipr[IPR_EXC_ADDR] & ~3; |
|
990 |
env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1; |
|
991 |
/* XXX: re-enable interrupts and memory mapping */ |
|
992 |
} |
|
993 |
|
|
994 |
void helper_hw_ret (uint64_t a) |
|
995 |
{ |
|
996 |
env->pc = a & ~3; |
|
997 |
env->ipr[IPR_EXC_ADDR] = a & 1; |
|
998 |
/* XXX: re-enable interrupts and memory mapping */ |
|
999 |
} |
|
1000 |
|
|
1001 |
uint64_t helper_mfpr (int iprn, uint64_t val) |
|
1002 |
{ |
|
1003 |
uint64_t tmp; |
|
1004 |
|
|
1005 |
if (cpu_alpha_mfpr(env, iprn, &tmp) == 0) |
|
1006 |
val = tmp; |
|
1007 |
|
|
1008 |
return val; |
|
1009 |
} |
|
1010 |
|
|
1011 |
void helper_mtpr (int iprn, uint64_t val) |
|
995 | 1012 |
{ |
996 |
uint64_t val; |
|
1013 |
cpu_alpha_mtpr(env, iprn, val, NULL); |
|
1014 |
} |
|
997 | 1015 |
|
998 |
if (cpu_alpha_mfpr(env, iprn, &val) == 0) |
|
999 |
T0 = val; |
|
1016 |
void helper_set_alt_mode (void) |
|
1017 |
{ |
|
1018 |
env->saved_mode = env->ps & 0xC; |
|
1019 |
env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC); |
|
1000 | 1020 |
} |
1001 | 1021 |
|
1002 |
void helper_mtpr (int iprn)
|
|
1022 |
void helper_restore_mode (void)
|
|
1003 | 1023 |
{ |
1004 |
cpu_alpha_mtpr(env, iprn, T0, NULL);
|
|
1024 |
env->ps = (env->ps & ~0xC) | env->saved_mode;
|
|
1005 | 1025 |
} |
1026 |
|
|
1006 | 1027 |
#endif |
1007 | 1028 |
|
1008 | 1029 |
/*****************************************************************************/ |
... | ... | |
1013 | 1034 |
* Hopefully, we emulate the PALcode, then we should never see |
1014 | 1035 |
* HW_LD / HW_ST instructions. |
1015 | 1036 |
*/ |
1016 |
void helper_ld_phys_to_virt (void)
|
|
1037 |
uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
|
|
1017 | 1038 |
{ |
1018 | 1039 |
uint64_t tlb_addr, physaddr; |
1019 | 1040 |
int index, mmu_idx; |
1020 | 1041 |
void *retaddr; |
1021 | 1042 |
|
1022 | 1043 |
mmu_idx = cpu_mmu_index(env); |
1023 |
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
|
1044 |
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
|
1024 | 1045 |
redo: |
1025 | 1046 |
tlb_addr = env->tlb_table[mmu_idx][index].addr_read; |
1026 |
if ((T0 & TARGET_PAGE_MASK) ==
|
|
1047 |
if ((virtaddr & TARGET_PAGE_MASK) ==
|
|
1027 | 1048 |
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
1028 |
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
|
|
1049 |
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
|
|
1029 | 1050 |
} else { |
1030 | 1051 |
/* the page is not in the TLB : fill it */ |
1031 | 1052 |
retaddr = GETPC(); |
1032 |
tlb_fill(T0, 0, mmu_idx, retaddr);
|
|
1053 |
tlb_fill(virtaddr, 0, mmu_idx, retaddr);
|
|
1033 | 1054 |
goto redo; |
1034 | 1055 |
} |
1035 |
T0 = physaddr;
|
|
1056 |
return physaddr;
|
|
1036 | 1057 |
} |
1037 | 1058 |
|
1038 |
void helper_st_phys_to_virt (void)
|
|
1059 |
uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
|
|
1039 | 1060 |
{ |
1040 | 1061 |
uint64_t tlb_addr, physaddr; |
1041 | 1062 |
int index, mmu_idx; |
1042 | 1063 |
void *retaddr; |
1043 | 1064 |
|
1044 | 1065 |
mmu_idx = cpu_mmu_index(env); |
1045 |
index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
|
1066 |
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
|
1046 | 1067 |
redo: |
1047 | 1068 |
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; |
1048 |
if ((T0 & TARGET_PAGE_MASK) ==
|
|
1069 |
if ((virtaddr & TARGET_PAGE_MASK) ==
|
|
1049 | 1070 |
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
1050 |
physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
|
|
1071 |
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
|
|
1051 | 1072 |
} else { |
1052 | 1073 |
/* the page is not in the TLB : fill it */ |
1053 | 1074 |
retaddr = GETPC(); |
1054 |
tlb_fill(T0, 1, mmu_idx, retaddr);
|
|
1075 |
tlb_fill(virtaddr, 1, mmu_idx, retaddr);
|
|
1055 | 1076 |
goto redo; |
1056 | 1077 |
} |
1057 |
T0 = physaddr; |
|
1078 |
return physaddr; |
|
1079 |
} |
|
1080 |
|
|
1081 |
void helper_ldl_raw(uint64_t t0, uint64_t t1) |
|
1082 |
{ |
|
1083 |
ldl_raw(t1, t0); |
|
1084 |
} |
|
1085 |
|
|
1086 |
void helper_ldq_raw(uint64_t t0, uint64_t t1) |
|
1087 |
{ |
|
1088 |
ldq_raw(t1, t0); |
|
1089 |
} |
|
1090 |
|
|
1091 |
void helper_ldl_l_raw(uint64_t t0, uint64_t t1) |
|
1092 |
{ |
|
1093 |
env->lock = t1; |
|
1094 |
ldl_raw(t1, t0); |
|
1095 |
} |
|
1096 |
|
|
1097 |
void helper_ldq_l_raw(uint64_t t0, uint64_t t1) |
|
1098 |
{ |
|
1099 |
env->lock = t1; |
|
1100 |
ldl_raw(t1, t0); |
|
1101 |
} |
|
1102 |
|
|
1103 |
void helper_ldl_kernel(uint64_t t0, uint64_t t1) |
|
1104 |
{ |
|
1105 |
ldl_kernel(t1, t0); |
|
1106 |
} |
|
1107 |
|
|
1108 |
void helper_ldq_kernel(uint64_t t0, uint64_t t1) |
|
1109 |
{ |
|
1110 |
ldq_kernel(t1, t0); |
|
1111 |
} |
|
1112 |
|
|
1113 |
void helper_ldl_data(uint64_t t0, uint64_t t1) |
|
1114 |
{ |
|
1115 |
ldl_data(t1, t0); |
|
1116 |
} |
|
1117 |
|
|
1118 |
void helper_ldq_data(uint64_t t0, uint64_t t1) |
|
1119 |
{ |
|
1120 |
ldq_data(t1, t0); |
|
1121 |
} |
|
1122 |
|
|
1123 |
void helper_stl_raw(uint64_t t0, uint64_t t1) |
|
1124 |
{ |
|
1125 |
stl_raw(t1, t0); |
|
1126 |
} |
|
1127 |
|
|
1128 |
void helper_stq_raw(uint64_t t0, uint64_t t1) |
|
1129 |
{ |
|
1130 |
stq_raw(t1, t0); |
|
1131 |
} |
|
1132 |
|
|
1133 |
uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1) |
|
1134 |
{ |
|
1135 |
uint64_t ret; |
|
1136 |
|
|
1137 |
if (t1 == env->lock) { |
|
1138 |
stl_raw(t1, t0); |
|
1139 |
ret = 0; |
|
1140 |
} else |
|
1141 |
ret = 1; |
|
1142 |
|
|
1143 |
env->lock = 1; |
|
1144 |
|
|
1145 |
return ret; |
|
1146 |
} |
|
1147 |
|
|
1148 |
uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1) |
|
1149 |
{ |
|
1150 |
uint64_t ret; |
|
1151 |
|
|
1152 |
if (t1 == env->lock) { |
|
1153 |
stq_raw(t1, t0); |
|
1154 |
ret = 0; |
|
1155 |
} else |
|
1156 |
ret = 1; |
|
1157 |
|
|
1158 |
env->lock = 1; |
|
1159 |
|
|
1160 |
return ret; |
|
1058 | 1161 |
} |
1059 | 1162 |
|
1060 | 1163 |
#define MMUSUFFIX _mmu |
Also available in: Unified diff