77 |
77 |
static TCGv_i64 cpu_T64[3];
|
78 |
78 |
#endif
|
79 |
79 |
static TCGv_i64 cpu_FT[2];
|
80 |
|
static TCGv_i64 cpu_AVRh[3], cpu_AVRl[3];
|
81 |
80 |
|
82 |
81 |
#include "gen-icount.h"
|
83 |
82 |
|
... | ... | |
122 |
121 |
cpu_FT[1] = tcg_global_mem_new_i64(TCG_AREG0,
|
123 |
122 |
offsetof(CPUState, ft1), "FT1");
|
124 |
123 |
|
125 |
|
cpu_AVRh[0] = tcg_global_mem_new_i64(TCG_AREG0,
|
126 |
|
offsetof(CPUState, avr0.u64[0]), "AVR0H");
|
127 |
|
cpu_AVRl[0] = tcg_global_mem_new_i64(TCG_AREG0,
|
128 |
|
offsetof(CPUState, avr0.u64[1]), "AVR0L");
|
129 |
|
cpu_AVRh[1] = tcg_global_mem_new_i64(TCG_AREG0,
|
130 |
|
offsetof(CPUState, avr1.u64[0]), "AVR1H");
|
131 |
|
cpu_AVRl[1] = tcg_global_mem_new_i64(TCG_AREG0,
|
132 |
|
offsetof(CPUState, avr1.u64[1]), "AVR1L");
|
133 |
|
cpu_AVRh[2] = tcg_global_mem_new_i64(TCG_AREG0,
|
134 |
|
offsetof(CPUState, avr2.u64[0]), "AVR2H");
|
135 |
|
cpu_AVRl[2] = tcg_global_mem_new_i64(TCG_AREG0,
|
136 |
|
offsetof(CPUState, avr2.u64[1]), "AVR2L");
|
137 |
|
|
138 |
124 |
p = cpu_reg_names;
|
139 |
125 |
|
140 |
126 |
for (i = 0; i < 8; i++) {
|
... | ... | |
162 |
148 |
p += (i < 10) ? 4 : 5;
|
163 |
149 |
|
164 |
150 |
sprintf(p, "avr%dH", i);
|
|
151 |
#ifdef WORDS_BIGENDIAN
|
|
152 |
cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
|
153 |
offsetof(CPUState, avr[i].u64[0]), p);
|
|
154 |
#else
|
165 |
155 |
cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
166 |
|
offsetof(CPUState, avr[i].u64[0]), p);
|
|
156 |
offsetof(CPUState, avr[i].u64[1]), p);
|
|
157 |
#endif
|
167 |
158 |
p += (i < 10) ? 6 : 7;
|
168 |
159 |
|
169 |
160 |
sprintf(p, "avr%dL", i);
|
|
161 |
#ifdef WORDS_BIGENDIAN
|
170 |
162 |
cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
171 |
|
offsetof(CPUState, avr[i].u64[1]), p);
|
|
163 |
offsetof(CPUState, avr[i].u64[1]), p);
|
|
164 |
#else
|
|
165 |
cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
|
166 |
offsetof(CPUState, avr[i].u64[0]), p);
|
|
167 |
#endif
|
172 |
168 |
p += (i < 10) ? 6 : 7;
|
173 |
169 |
}
|
174 |
170 |
|
... | ... | |
5939 |
5935 |
/*** Altivec vector extension ***/
|
5940 |
5936 |
/* Altivec registers moves */
|
5941 |
5937 |
|
5942 |
|
static always_inline void gen_load_avr(int t, int reg) {
|
5943 |
|
tcg_gen_mov_i64(cpu_AVRh[t], cpu_avrh[reg]);
|
5944 |
|
tcg_gen_mov_i64(cpu_AVRl[t], cpu_avrl[reg]);
|
5945 |
|
}
|
5946 |
|
|
5947 |
|
static always_inline void gen_store_avr(int reg, int t) {
|
5948 |
|
tcg_gen_mov_i64(cpu_avrh[reg], cpu_AVRh[t]);
|
5949 |
|
tcg_gen_mov_i64(cpu_avrl[reg], cpu_AVRl[t]);
|
5950 |
|
}
|
5951 |
|
|
5952 |
|
#define op_vr_ldst(name) (*gen_op_##name[ctx->mem_idx])()
|
5953 |
|
#define OP_VR_LD_TABLE(name) \
|
5954 |
|
static GenOpFunc *gen_op_vr_l##name[NB_MEM_FUNCS] = { \
|
5955 |
|
GEN_MEM_FUNCS(vr_l##name), \
|
5956 |
|
};
|
5957 |
|
#define OP_VR_ST_TABLE(name) \
|
5958 |
|
static GenOpFunc *gen_op_vr_st##name[NB_MEM_FUNCS] = { \
|
5959 |
|
GEN_MEM_FUNCS(vr_st##name), \
|
5960 |
|
};
|
5961 |
|
|
5962 |
5938 |
#define GEN_VR_LDX(name, opc2, opc3) \
|
5963 |
|
GEN_HANDLER(l##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \
|
|
5939 |
GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \
|
5964 |
5940 |
{ \
|
|
5941 |
TCGv EA; \
|
5965 |
5942 |
if (unlikely(!ctx->altivec_enabled)) { \
|
5966 |
5943 |
GEN_EXCP_NO_VR(ctx); \
|
5967 |
5944 |
return; \
|
5968 |
5945 |
} \
|
5969 |
|
gen_addr_reg_index(cpu_T[0], ctx); \
|
5970 |
|
op_vr_ldst(vr_l##name); \
|
5971 |
|
gen_store_avr(rD(ctx->opcode), 0); \
|
|
5946 |
EA = tcg_temp_new(); \
|
|
5947 |
gen_addr_reg_index(EA, ctx); \
|
|
5948 |
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
|
5949 |
if (ctx->mem_idx & 1) { \
|
|
5950 |
gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5951 |
tcg_gen_addi_tl(EA, EA, 8); \
|
|
5952 |
gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5953 |
} else { \
|
|
5954 |
gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5955 |
tcg_gen_addi_tl(EA, EA, 8); \
|
|
5956 |
gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5957 |
} \
|
|
5958 |
tcg_temp_free(EA); \
|
5972 |
5959 |
}
|
5973 |
5960 |
|
5974 |
5961 |
#define GEN_VR_STX(name, opc2, opc3) \
|
5975 |
5962 |
GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \
|
5976 |
5963 |
{ \
|
|
5964 |
TCGv EA; \
|
5977 |
5965 |
if (unlikely(!ctx->altivec_enabled)) { \
|
5978 |
5966 |
GEN_EXCP_NO_VR(ctx); \
|
5979 |
5967 |
return; \
|
5980 |
5968 |
} \
|
5981 |
|
gen_addr_reg_index(cpu_T[0], ctx); \
|
5982 |
|
gen_load_avr(0, rS(ctx->opcode)); \
|
5983 |
|
op_vr_ldst(vr_st##name); \
|
|
5969 |
EA = tcg_temp_new(); \
|
|
5970 |
gen_addr_reg_index(EA, ctx); \
|
|
5971 |
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
|
5972 |
if (ctx->mem_idx & 1) { \
|
|
5973 |
gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5974 |
tcg_gen_addi_tl(EA, EA, 8); \
|
|
5975 |
gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5976 |
} else { \
|
|
5977 |
gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5978 |
tcg_gen_addi_tl(EA, EA, 8); \
|
|
5979 |
gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
|
|
5980 |
} \
|
|
5981 |
tcg_temp_free(EA); \
|
5984 |
5982 |
}
|
5985 |
5983 |
|
5986 |
|
OP_VR_LD_TABLE(vx);
|
5987 |
|
GEN_VR_LDX(vx, 0x07, 0x03);
|
|
5984 |
GEN_VR_LDX(lvx, 0x07, 0x03);
|
5988 |
5985 |
/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
|
5989 |
|
#define gen_op_vr_lvxl gen_op_vr_lvx
|
5990 |
|
GEN_VR_LDX(vxl, 0x07, 0x0B);
|
|
5986 |
GEN_VR_LDX(lvxl, 0x07, 0x0B);
|
5991 |
5987 |
|
5992 |
|
OP_VR_ST_TABLE(vx);
|
5993 |
|
GEN_VR_STX(vx, 0x07, 0x07);
|
|
5988 |
GEN_VR_STX(svx, 0x07, 0x07);
|
5994 |
5989 |
/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
|
5995 |
|
#define gen_op_vr_stvxl gen_op_vr_stvx
|
5996 |
|
GEN_VR_STX(vxl, 0x07, 0x0F);
|
|
5990 |
GEN_VR_STX(svxl, 0x07, 0x0F);
|
5997 |
5991 |
|
5998 |
5992 |
/*** SPE extension ***/
|
5999 |
5993 |
/* Register moves */
|