root / target-xtensa / translate.c @ 1de7afc9
History | View | Annotate | Download (98.5 kB)
1 |
/*
|
---|---|
2 |
* Xtensa ISA:
|
3 |
* http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
|
4 |
*
|
5 |
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
|
6 |
* All rights reserved.
|
7 |
*
|
8 |
* Redistribution and use in source and binary forms, with or without
|
9 |
* modification, are permitted provided that the following conditions are met:
|
10 |
* * Redistributions of source code must retain the above copyright
|
11 |
* notice, this list of conditions and the following disclaimer.
|
12 |
* * Redistributions in binary form must reproduce the above copyright
|
13 |
* notice, this list of conditions and the following disclaimer in the
|
14 |
* documentation and/or other materials provided with the distribution.
|
15 |
* * Neither the name of the Open Source and Linux Lab nor the
|
16 |
* names of its contributors may be used to endorse or promote products
|
17 |
* derived from this software without specific prior written permission.
|
18 |
*
|
19 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
20 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
21 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
22 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
23 |
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
24 |
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
25 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
26 |
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
28 |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29 |
*/
|
30 |
|
31 |
#include <stdio.h> |
32 |
|
33 |
#include "cpu.h" |
34 |
#include "exec/exec-all.h" |
35 |
#include "disas/disas.h" |
36 |
#include "tcg-op.h" |
37 |
#include "qemu/log.h" |
38 |
#include "sysemu.h" |
39 |
|
40 |
#include "helper.h" |
41 |
#define GEN_HELPER 1 |
42 |
#include "helper.h" |
43 |
|
44 |
typedef struct DisasContext { |
45 |
const XtensaConfig *config;
|
46 |
TranslationBlock *tb; |
47 |
uint32_t pc; |
48 |
uint32_t next_pc; |
49 |
int cring;
|
50 |
int ring;
|
51 |
uint32_t lbeg; |
52 |
uint32_t lend; |
53 |
TCGv_i32 litbase; |
54 |
int is_jmp;
|
55 |
int singlestep_enabled;
|
56 |
|
57 |
bool sar_5bit;
|
58 |
bool sar_m32_5bit;
|
59 |
bool sar_m32_allocated;
|
60 |
TCGv_i32 sar_m32; |
61 |
|
62 |
uint32_t ccount_delta; |
63 |
unsigned used_window;
|
64 |
|
65 |
bool debug;
|
66 |
bool icount;
|
67 |
TCGv_i32 next_icount; |
68 |
|
69 |
unsigned cpenable;
|
70 |
} DisasContext; |
71 |
|
72 |
static TCGv_ptr cpu_env;
|
73 |
static TCGv_i32 cpu_pc;
|
74 |
static TCGv_i32 cpu_R[16]; |
75 |
static TCGv_i32 cpu_FR[16]; |
76 |
static TCGv_i32 cpu_SR[256]; |
77 |
static TCGv_i32 cpu_UR[256]; |
78 |
|
79 |
#include "exec/gen-icount.h" |
80 |
|
81 |
typedef struct XtensaReg { |
82 |
const char *name; |
83 |
uint64_t opt_bits; |
84 |
enum {
|
85 |
SR_R = 1,
|
86 |
SR_W = 2,
|
87 |
SR_X = 4,
|
88 |
SR_RW = 3,
|
89 |
SR_RWX = 7,
|
90 |
} access; |
91 |
} XtensaReg; |
92 |
|
93 |
#define XTENSA_REG_ACCESS(regname, opt, acc) { \
|
94 |
.name = (regname), \ |
95 |
.opt_bits = XTENSA_OPTION_BIT(opt), \ |
96 |
.access = (acc), \ |
97 |
} |
98 |
|
99 |
#define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX)
|
100 |
|
101 |
#define XTENSA_REG_BITS(regname, opt) { \
|
102 |
.name = (regname), \ |
103 |
.opt_bits = (opt), \ |
104 |
.access = SR_RWX, \ |
105 |
} |
106 |
|
107 |
static const XtensaReg sregnames[256] = { |
108 |
[LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP),
|
109 |
[LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP),
|
110 |
[LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP),
|
111 |
[SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL),
|
112 |
[BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN),
|
113 |
[LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R),
|
114 |
[SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE),
|
115 |
[ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16),
|
116 |
[ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16),
|
117 |
[MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16),
|
118 |
[MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16), |
119 |
[MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16), |
120 |
[MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16), |
121 |
[WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER),
|
122 |
[WINDOW_START] = XTENSA_REG("WINDOW_START",
|
123 |
XTENSA_OPTION_WINDOWED_REGISTER), |
124 |
[PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU),
|
125 |
[RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU),
|
126 |
[ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU),
|
127 |
[DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU),
|
128 |
[IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG),
|
129 |
[CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR),
|
130 |
[ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL),
|
131 |
[IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG),
|
132 |
[IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG), |
133 |
[DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG),
|
134 |
[DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG), |
135 |
[DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG),
|
136 |
[DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG), |
137 |
[EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION),
|
138 |
[EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
139 |
[EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
140 |
[EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
141 |
[EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
142 |
[EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
143 |
[EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
144 |
[DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION),
|
145 |
[EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
|
146 |
[EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
147 |
[EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
148 |
[EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
149 |
[EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
150 |
[EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
151 |
[EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION),
|
152 |
[EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2", |
153 |
XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
154 |
[EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3", |
155 |
XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
156 |
[EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4", |
157 |
XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
158 |
[EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5", |
159 |
XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
160 |
[EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6", |
161 |
XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
162 |
[EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7", |
163 |
XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), |
164 |
[CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR),
|
165 |
[INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW),
|
166 |
[INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W),
|
167 |
[INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT),
|
168 |
[PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL),
|
169 |
[VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR),
|
170 |
[EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION),
|
171 |
[DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R),
|
172 |
[CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT),
|
173 |
[PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R),
|
174 |
[ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG),
|
175 |
[ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG),
|
176 |
[EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION),
|
177 |
[CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT),
|
178 |
[CCOMPARE + 1] = XTENSA_REG("CCOMPARE1", |
179 |
XTENSA_OPTION_TIMER_INTERRUPT), |
180 |
[CCOMPARE + 2] = XTENSA_REG("CCOMPARE2", |
181 |
XTENSA_OPTION_TIMER_INTERRUPT), |
182 |
[MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR),
|
183 |
[MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR), |
184 |
[MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR), |
185 |
[MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR), |
186 |
}; |
187 |
|
188 |
static const XtensaReg uregnames[256] = { |
189 |
[THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER),
|
190 |
[FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR),
|
191 |
[FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR),
|
192 |
}; |
193 |
|
194 |
void xtensa_translate_init(void) |
195 |
{ |
196 |
static const char * const regnames[] = { |
197 |
"ar0", "ar1", "ar2", "ar3", |
198 |
"ar4", "ar5", "ar6", "ar7", |
199 |
"ar8", "ar9", "ar10", "ar11", |
200 |
"ar12", "ar13", "ar14", "ar15", |
201 |
}; |
202 |
static const char * const fregnames[] = { |
203 |
"f0", "f1", "f2", "f3", |
204 |
"f4", "f5", "f6", "f7", |
205 |
"f8", "f9", "f10", "f11", |
206 |
"f12", "f13", "f14", "f15", |
207 |
}; |
208 |
int i;
|
209 |
|
210 |
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
211 |
cpu_pc = tcg_global_mem_new_i32(TCG_AREG0, |
212 |
offsetof(CPUXtensaState, pc), "pc");
|
213 |
|
214 |
for (i = 0; i < 16; i++) { |
215 |
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, |
216 |
offsetof(CPUXtensaState, regs[i]), |
217 |
regnames[i]); |
218 |
} |
219 |
|
220 |
for (i = 0; i < 16; i++) { |
221 |
cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0, |
222 |
offsetof(CPUXtensaState, fregs[i]), |
223 |
fregnames[i]); |
224 |
} |
225 |
|
226 |
for (i = 0; i < 256; ++i) { |
227 |
if (sregnames[i].name) {
|
228 |
cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0, |
229 |
offsetof(CPUXtensaState, sregs[i]), |
230 |
sregnames[i].name); |
231 |
} |
232 |
} |
233 |
|
234 |
for (i = 0; i < 256; ++i) { |
235 |
if (uregnames[i].name) {
|
236 |
cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0, |
237 |
offsetof(CPUXtensaState, uregs[i]), |
238 |
uregnames[i].name); |
239 |
} |
240 |
} |
241 |
#define GEN_HELPER 2 |
242 |
#include "helper.h" |
243 |
} |
244 |
|
245 |
static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt) |
246 |
{ |
247 |
return xtensa_option_bits_enabled(dc->config, opt);
|
248 |
} |
249 |
|
250 |
static inline bool option_enabled(DisasContext *dc, int opt) |
251 |
{ |
252 |
return xtensa_option_enabled(dc->config, opt);
|
253 |
} |
254 |
|
255 |
static void init_litbase(DisasContext *dc) |
256 |
{ |
257 |
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
|
258 |
dc->litbase = tcg_temp_local_new_i32(); |
259 |
tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
|
260 |
} |
261 |
} |
262 |
|
263 |
static void reset_litbase(DisasContext *dc) |
264 |
{ |
265 |
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
|
266 |
tcg_temp_free(dc->litbase); |
267 |
} |
268 |
} |
269 |
|
270 |
static void init_sar_tracker(DisasContext *dc) |
271 |
{ |
272 |
dc->sar_5bit = false;
|
273 |
dc->sar_m32_5bit = false;
|
274 |
dc->sar_m32_allocated = false;
|
275 |
} |
276 |
|
277 |
static void reset_sar_tracker(DisasContext *dc) |
278 |
{ |
279 |
if (dc->sar_m32_allocated) {
|
280 |
tcg_temp_free(dc->sar_m32); |
281 |
} |
282 |
} |
283 |
|
284 |
static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) |
285 |
{ |
286 |
tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
|
287 |
if (dc->sar_m32_5bit) {
|
288 |
tcg_gen_discard_i32(dc->sar_m32); |
289 |
} |
290 |
dc->sar_5bit = true;
|
291 |
dc->sar_m32_5bit = false;
|
292 |
} |
293 |
|
294 |
static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) |
295 |
{ |
296 |
TCGv_i32 tmp = tcg_const_i32(32);
|
297 |
if (!dc->sar_m32_allocated) {
|
298 |
dc->sar_m32 = tcg_temp_local_new_i32(); |
299 |
dc->sar_m32_allocated = true;
|
300 |
} |
301 |
tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
|
302 |
tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32); |
303 |
dc->sar_5bit = false;
|
304 |
dc->sar_m32_5bit = true;
|
305 |
tcg_temp_free(tmp); |
306 |
} |
307 |
|
308 |
static void gen_advance_ccount(DisasContext *dc) |
309 |
{ |
310 |
if (dc->ccount_delta > 0) { |
311 |
TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta); |
312 |
dc->ccount_delta = 0;
|
313 |
gen_helper_advance_ccount(cpu_env, tmp); |
314 |
tcg_temp_free(tmp); |
315 |
} |
316 |
} |
317 |
|
318 |
static void reset_used_window(DisasContext *dc) |
319 |
{ |
320 |
dc->used_window = 0;
|
321 |
} |
322 |
|
323 |
static void gen_exception(DisasContext *dc, int excp) |
324 |
{ |
325 |
TCGv_i32 tmp = tcg_const_i32(excp); |
326 |
gen_advance_ccount(dc); |
327 |
gen_helper_exception(cpu_env, tmp); |
328 |
tcg_temp_free(tmp); |
329 |
} |
330 |
|
331 |
static void gen_exception_cause(DisasContext *dc, uint32_t cause) |
332 |
{ |
333 |
TCGv_i32 tpc = tcg_const_i32(dc->pc); |
334 |
TCGv_i32 tcause = tcg_const_i32(cause); |
335 |
gen_advance_ccount(dc); |
336 |
gen_helper_exception_cause(cpu_env, tpc, tcause); |
337 |
tcg_temp_free(tpc); |
338 |
tcg_temp_free(tcause); |
339 |
if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
|
340 |
cause == SYSCALL_CAUSE) { |
341 |
dc->is_jmp = DISAS_UPDATE; |
342 |
} |
343 |
} |
344 |
|
345 |
static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, |
346 |
TCGv_i32 vaddr) |
347 |
{ |
348 |
TCGv_i32 tpc = tcg_const_i32(dc->pc); |
349 |
TCGv_i32 tcause = tcg_const_i32(cause); |
350 |
gen_advance_ccount(dc); |
351 |
gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); |
352 |
tcg_temp_free(tpc); |
353 |
tcg_temp_free(tcause); |
354 |
} |
355 |
|
356 |
static void gen_debug_exception(DisasContext *dc, uint32_t cause) |
357 |
{ |
358 |
TCGv_i32 tpc = tcg_const_i32(dc->pc); |
359 |
TCGv_i32 tcause = tcg_const_i32(cause); |
360 |
gen_advance_ccount(dc); |
361 |
gen_helper_debug_exception(cpu_env, tpc, tcause); |
362 |
tcg_temp_free(tpc); |
363 |
tcg_temp_free(tcause); |
364 |
if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
|
365 |
dc->is_jmp = DISAS_UPDATE; |
366 |
} |
367 |
} |
368 |
|
369 |
static void gen_check_privilege(DisasContext *dc) |
370 |
{ |
371 |
if (dc->cring) {
|
372 |
gen_exception_cause(dc, PRIVILEGED_CAUSE); |
373 |
dc->is_jmp = DISAS_UPDATE; |
374 |
} |
375 |
} |
376 |
|
377 |
static void gen_check_cpenable(DisasContext *dc, unsigned cp) |
378 |
{ |
379 |
if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
|
380 |
!(dc->cpenable & (1 << cp))) {
|
381 |
gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp); |
382 |
dc->is_jmp = DISAS_UPDATE; |
383 |
} |
384 |
} |
385 |
|
386 |
static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) |
387 |
{ |
388 |
tcg_gen_mov_i32(cpu_pc, dest); |
389 |
gen_advance_ccount(dc); |
390 |
if (dc->icount) {
|
391 |
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); |
392 |
} |
393 |
if (dc->singlestep_enabled) {
|
394 |
gen_exception(dc, EXCP_DEBUG); |
395 |
} else {
|
396 |
if (slot >= 0) { |
397 |
tcg_gen_goto_tb(slot); |
398 |
tcg_gen_exit_tb((tcg_target_long)dc->tb + slot); |
399 |
} else {
|
400 |
tcg_gen_exit_tb(0);
|
401 |
} |
402 |
} |
403 |
dc->is_jmp = DISAS_UPDATE; |
404 |
} |
405 |
|
406 |
static void gen_jump(DisasContext *dc, TCGv dest) |
407 |
{ |
408 |
gen_jump_slot(dc, dest, -1);
|
409 |
} |
410 |
|
411 |
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot) |
412 |
{ |
413 |
TCGv_i32 tmp = tcg_const_i32(dest); |
414 |
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) { |
415 |
slot = -1;
|
416 |
} |
417 |
gen_jump_slot(dc, tmp, slot); |
418 |
tcg_temp_free(tmp); |
419 |
} |
420 |
|
421 |
static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest, |
422 |
int slot)
|
423 |
{ |
424 |
TCGv_i32 tcallinc = tcg_const_i32(callinc); |
425 |
|
426 |
tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS], |
427 |
tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN); |
428 |
tcg_temp_free(tcallinc); |
429 |
tcg_gen_movi_i32(cpu_R[callinc << 2],
|
430 |
(callinc << 30) | (dc->next_pc & 0x3fffffff)); |
431 |
gen_jump_slot(dc, dest, slot); |
432 |
} |
433 |
|
434 |
static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest) |
435 |
{ |
436 |
gen_callw_slot(dc, callinc, dest, -1);
|
437 |
} |
438 |
|
439 |
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot) |
440 |
{ |
441 |
TCGv_i32 tmp = tcg_const_i32(dest); |
442 |
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) { |
443 |
slot = -1;
|
444 |
} |
445 |
gen_callw_slot(dc, callinc, tmp, slot); |
446 |
tcg_temp_free(tmp); |
447 |
} |
448 |
|
449 |
static bool gen_check_loop_end(DisasContext *dc, int slot) |
450 |
{ |
451 |
if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
|
452 |
!(dc->tb->flags & XTENSA_TBFLAG_EXCM) && |
453 |
dc->next_pc == dc->lend) { |
454 |
int label = gen_new_label();
|
455 |
|
456 |
gen_advance_ccount(dc); |
457 |
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
|
458 |
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
|
459 |
gen_jumpi(dc, dc->lbeg, slot); |
460 |
gen_set_label(label); |
461 |
gen_jumpi(dc, dc->next_pc, -1);
|
462 |
return true; |
463 |
} |
464 |
return false; |
465 |
} |
466 |
|
467 |
static void gen_jumpi_check_loop_end(DisasContext *dc, int slot) |
468 |
{ |
469 |
if (!gen_check_loop_end(dc, slot)) {
|
470 |
gen_jumpi(dc, dc->next_pc, slot); |
471 |
} |
472 |
} |
473 |
|
474 |
static void gen_brcond(DisasContext *dc, TCGCond cond, |
475 |
TCGv_i32 t0, TCGv_i32 t1, uint32_t offset) |
476 |
{ |
477 |
int label = gen_new_label();
|
478 |
|
479 |
gen_advance_ccount(dc); |
480 |
tcg_gen_brcond_i32(cond, t0, t1, label); |
481 |
gen_jumpi_check_loop_end(dc, 0);
|
482 |
gen_set_label(label); |
483 |
gen_jumpi(dc, dc->pc + offset, 1);
|
484 |
} |
485 |
|
486 |
static void gen_brcondi(DisasContext *dc, TCGCond cond, |
487 |
TCGv_i32 t0, uint32_t t1, uint32_t offset) |
488 |
{ |
489 |
TCGv_i32 tmp = tcg_const_i32(t1); |
490 |
gen_brcond(dc, cond, t0, tmp, offset); |
491 |
tcg_temp_free(tmp); |
492 |
} |
493 |
|
494 |
static void gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access) |
495 |
{ |
496 |
if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) {
|
497 |
if (sregnames[sr].name) {
|
498 |
qemu_log("SR %s is not configured\n", sregnames[sr].name);
|
499 |
} else {
|
500 |
qemu_log("SR %d is not implemented\n", sr);
|
501 |
} |
502 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
503 |
} else if (!(sregnames[sr].access & access)) { |
504 |
static const char * const access_text[] = { |
505 |
[SR_R] = "rsr",
|
506 |
[SR_W] = "wsr",
|
507 |
[SR_X] = "xsr",
|
508 |
}; |
509 |
assert(access < ARRAY_SIZE(access_text) && access_text[access]); |
510 |
qemu_log("SR %s is not available for %s\n", sregnames[sr].name,
|
511 |
access_text[access]); |
512 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
513 |
} |
514 |
} |
515 |
|
516 |
static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) |
517 |
{ |
518 |
gen_advance_ccount(dc); |
519 |
tcg_gen_mov_i32(d, cpu_SR[sr]); |
520 |
} |
521 |
|
522 |
static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr) |
523 |
{ |
524 |
tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
|
525 |
tcg_gen_or_i32(d, d, cpu_SR[sr]); |
526 |
tcg_gen_andi_i32(d, d, 0xfffffffc);
|
527 |
} |
528 |
|
529 |
static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) |
530 |
{ |
531 |
static void (* const rsr_handler[256])(DisasContext *dc, |
532 |
TCGv_i32 d, uint32_t sr) = { |
533 |
[CCOUNT] = gen_rsr_ccount, |
534 |
[PTEVADDR] = gen_rsr_ptevaddr, |
535 |
}; |
536 |
|
537 |
if (rsr_handler[sr]) {
|
538 |
rsr_handler[sr](dc, d, sr); |
539 |
} else {
|
540 |
tcg_gen_mov_i32(d, cpu_SR[sr]); |
541 |
} |
542 |
} |
543 |
|
544 |
static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
545 |
{ |
546 |
gen_helper_wsr_lbeg(cpu_env, s); |
547 |
gen_jumpi_check_loop_end(dc, 0);
|
548 |
} |
549 |
|
550 |
static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
551 |
{ |
552 |
gen_helper_wsr_lend(cpu_env, s); |
553 |
gen_jumpi_check_loop_end(dc, 0);
|
554 |
} |
555 |
|
556 |
static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
557 |
{ |
558 |
tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
|
559 |
if (dc->sar_m32_5bit) {
|
560 |
tcg_gen_discard_i32(dc->sar_m32); |
561 |
} |
562 |
dc->sar_5bit = false;
|
563 |
dc->sar_m32_5bit = false;
|
564 |
} |
565 |
|
566 |
static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
567 |
{ |
568 |
tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
|
569 |
} |
570 |
|
571 |
static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
572 |
{ |
573 |
tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
|
574 |
/* This can change tb->flags, so exit tb */
|
575 |
gen_jumpi_check_loop_end(dc, -1);
|
576 |
} |
577 |
|
578 |
static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
579 |
{ |
580 |
tcg_gen_ext8s_i32(cpu_SR[sr], s); |
581 |
} |
582 |
|
583 |
static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
584 |
{ |
585 |
gen_helper_wsr_windowbase(cpu_env, v); |
586 |
reset_used_window(dc); |
587 |
} |
588 |
|
589 |
static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
590 |
{ |
591 |
tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1); |
592 |
reset_used_window(dc); |
593 |
} |
594 |
|
595 |
static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
596 |
{ |
597 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
|
598 |
} |
599 |
|
600 |
static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
601 |
{ |
602 |
gen_helper_wsr_rasid(cpu_env, v); |
603 |
/* This can change tb->flags, so exit tb */
|
604 |
gen_jumpi_check_loop_end(dc, -1);
|
605 |
} |
606 |
|
607 |
static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
608 |
{ |
609 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
|
610 |
} |
611 |
|
612 |
static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
613 |
{ |
614 |
gen_helper_wsr_ibreakenable(cpu_env, v); |
615 |
gen_jumpi_check_loop_end(dc, 0);
|
616 |
} |
617 |
|
618 |
static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
619 |
{ |
620 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f);
|
621 |
} |
622 |
|
623 |
static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
624 |
{ |
625 |
unsigned id = sr - IBREAKA;
|
626 |
|
627 |
if (id < dc->config->nibreak) {
|
628 |
TCGv_i32 tmp = tcg_const_i32(id); |
629 |
gen_helper_wsr_ibreaka(cpu_env, tmp, v); |
630 |
tcg_temp_free(tmp); |
631 |
gen_jumpi_check_loop_end(dc, 0);
|
632 |
} |
633 |
} |
634 |
|
635 |
static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
636 |
{ |
637 |
unsigned id = sr - DBREAKA;
|
638 |
|
639 |
if (id < dc->config->ndbreak) {
|
640 |
TCGv_i32 tmp = tcg_const_i32(id); |
641 |
gen_helper_wsr_dbreaka(cpu_env, tmp, v); |
642 |
tcg_temp_free(tmp); |
643 |
} |
644 |
} |
645 |
|
646 |
static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
647 |
{ |
648 |
unsigned id = sr - DBREAKC;
|
649 |
|
650 |
if (id < dc->config->ndbreak) {
|
651 |
TCGv_i32 tmp = tcg_const_i32(id); |
652 |
gen_helper_wsr_dbreakc(cpu_env, tmp, v); |
653 |
tcg_temp_free(tmp); |
654 |
} |
655 |
} |
656 |
|
657 |
static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
658 |
{ |
659 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0xff);
|
660 |
/* This can change tb->flags, so exit tb */
|
661 |
gen_jumpi_check_loop_end(dc, -1);
|
662 |
} |
663 |
|
664 |
static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
665 |
{ |
666 |
tcg_gen_andi_i32(cpu_SR[sr], v, |
667 |
dc->config->inttype_mask[INTTYPE_SOFTWARE]); |
668 |
gen_helper_check_interrupts(cpu_env); |
669 |
gen_jumpi_check_loop_end(dc, 0);
|
670 |
} |
671 |
|
672 |
static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
673 |
{ |
674 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
675 |
|
676 |
tcg_gen_andi_i32(tmp, v, |
677 |
dc->config->inttype_mask[INTTYPE_EDGE] | |
678 |
dc->config->inttype_mask[INTTYPE_NMI] | |
679 |
dc->config->inttype_mask[INTTYPE_SOFTWARE]); |
680 |
tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp); |
681 |
tcg_temp_free(tmp); |
682 |
gen_helper_check_interrupts(cpu_env); |
683 |
} |
684 |
|
685 |
static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
686 |
{ |
687 |
tcg_gen_mov_i32(cpu_SR[sr], v); |
688 |
gen_helper_check_interrupts(cpu_env); |
689 |
gen_jumpi_check_loop_end(dc, 0);
|
690 |
} |
691 |
|
692 |
static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
693 |
{ |
694 |
uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB | |
695 |
PS_UM | PS_EXCM | PS_INTLEVEL; |
696 |
|
697 |
if (option_enabled(dc, XTENSA_OPTION_MMU)) {
|
698 |
mask |= PS_RING; |
699 |
} |
700 |
tcg_gen_andi_i32(cpu_SR[sr], v, mask); |
701 |
reset_used_window(dc); |
702 |
gen_helper_check_interrupts(cpu_env); |
703 |
/* This can change mmu index and tb->flags, so exit tb */
|
704 |
gen_jumpi_check_loop_end(dc, -1);
|
705 |
} |
706 |
|
707 |
static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
708 |
{ |
709 |
if (dc->icount) {
|
710 |
tcg_gen_mov_i32(dc->next_icount, v); |
711 |
} else {
|
712 |
tcg_gen_mov_i32(cpu_SR[sr], v); |
713 |
} |
714 |
} |
715 |
|
716 |
static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
717 |
{ |
718 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
|
719 |
/* This can change tb->flags, so exit tb */
|
720 |
gen_jumpi_check_loop_end(dc, -1);
|
721 |
} |
722 |
|
723 |
static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
724 |
{ |
725 |
uint32_t id = sr - CCOMPARE; |
726 |
if (id < dc->config->nccompare) {
|
727 |
uint32_t int_bit = 1 << dc->config->timerint[id];
|
728 |
gen_advance_ccount(dc); |
729 |
tcg_gen_mov_i32(cpu_SR[sr], v); |
730 |
tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit); |
731 |
gen_helper_check_interrupts(cpu_env); |
732 |
} |
733 |
} |
734 |
|
735 |
static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
736 |
{ |
737 |
static void (* const wsr_handler[256])(DisasContext *dc, |
738 |
uint32_t sr, TCGv_i32 v) = { |
739 |
[LBEG] = gen_wsr_lbeg, |
740 |
[LEND] = gen_wsr_lend, |
741 |
[SAR] = gen_wsr_sar, |
742 |
[BR] = gen_wsr_br, |
743 |
[LITBASE] = gen_wsr_litbase, |
744 |
[ACCHI] = gen_wsr_acchi, |
745 |
[WINDOW_BASE] = gen_wsr_windowbase, |
746 |
[WINDOW_START] = gen_wsr_windowstart, |
747 |
[PTEVADDR] = gen_wsr_ptevaddr, |
748 |
[RASID] = gen_wsr_rasid, |
749 |
[ITLBCFG] = gen_wsr_tlbcfg, |
750 |
[DTLBCFG] = gen_wsr_tlbcfg, |
751 |
[IBREAKENABLE] = gen_wsr_ibreakenable, |
752 |
[ATOMCTL] = gen_wsr_atomctl, |
753 |
[IBREAKA] = gen_wsr_ibreaka, |
754 |
[IBREAKA + 1] = gen_wsr_ibreaka,
|
755 |
[DBREAKA] = gen_wsr_dbreaka, |
756 |
[DBREAKA + 1] = gen_wsr_dbreaka,
|
757 |
[DBREAKC] = gen_wsr_dbreakc, |
758 |
[DBREAKC + 1] = gen_wsr_dbreakc,
|
759 |
[CPENABLE] = gen_wsr_cpenable, |
760 |
[INTSET] = gen_wsr_intset, |
761 |
[INTCLEAR] = gen_wsr_intclear, |
762 |
[INTENABLE] = gen_wsr_intenable, |
763 |
[PS] = gen_wsr_ps, |
764 |
[ICOUNT] = gen_wsr_icount, |
765 |
[ICOUNTLEVEL] = gen_wsr_icountlevel, |
766 |
[CCOMPARE] = gen_wsr_ccompare, |
767 |
[CCOMPARE + 1] = gen_wsr_ccompare,
|
768 |
[CCOMPARE + 2] = gen_wsr_ccompare,
|
769 |
}; |
770 |
|
771 |
if (wsr_handler[sr]) {
|
772 |
wsr_handler[sr](dc, sr, s); |
773 |
} else {
|
774 |
tcg_gen_mov_i32(cpu_SR[sr], s); |
775 |
} |
776 |
} |
777 |
|
778 |
static void gen_wur(uint32_t ur, TCGv_i32 s) |
779 |
{ |
780 |
switch (ur) {
|
781 |
case FCR:
|
782 |
gen_helper_wur_fcr(cpu_env, s); |
783 |
break;
|
784 |
|
785 |
case FSR:
|
786 |
tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80);
|
787 |
break;
|
788 |
|
789 |
default:
|
790 |
tcg_gen_mov_i32(cpu_UR[ur], s); |
791 |
break;
|
792 |
} |
793 |
} |
794 |
|
795 |
static void gen_load_store_alignment(DisasContext *dc, int shift, |
796 |
TCGv_i32 addr, bool no_hw_alignment)
|
797 |
{ |
798 |
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
|
799 |
tcg_gen_andi_i32(addr, addr, ~0 << shift);
|
800 |
} else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) && |
801 |
no_hw_alignment) { |
802 |
int label = gen_new_label();
|
803 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
804 |
tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
|
805 |
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
|
806 |
gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr); |
807 |
gen_set_label(label); |
808 |
tcg_temp_free(tmp); |
809 |
} |
810 |
} |
811 |
|
812 |
static void gen_waiti(DisasContext *dc, uint32_t imm4) |
813 |
{ |
814 |
TCGv_i32 pc = tcg_const_i32(dc->next_pc); |
815 |
TCGv_i32 intlevel = tcg_const_i32(imm4); |
816 |
gen_advance_ccount(dc); |
817 |
gen_helper_waiti(cpu_env, pc, intlevel); |
818 |
tcg_temp_free(pc); |
819 |
tcg_temp_free(intlevel); |
820 |
} |
821 |
|
822 |
static void gen_window_check1(DisasContext *dc, unsigned r1) |
823 |
{ |
824 |
if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
|
825 |
return;
|
826 |
} |
827 |
if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
|
828 |
r1 / 4 > dc->used_window) {
|
829 |
TCGv_i32 pc = tcg_const_i32(dc->pc); |
830 |
TCGv_i32 w = tcg_const_i32(r1 / 4);
|
831 |
|
832 |
dc->used_window = r1 / 4;
|
833 |
gen_advance_ccount(dc); |
834 |
gen_helper_window_check(cpu_env, pc, w); |
835 |
|
836 |
tcg_temp_free(w); |
837 |
tcg_temp_free(pc); |
838 |
} |
839 |
} |
840 |
|
841 |
static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2) |
842 |
{ |
843 |
gen_window_check1(dc, r1 > r2 ? r1 : r2); |
844 |
} |
845 |
|
846 |
static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2, |
847 |
unsigned r3)
|
848 |
{ |
849 |
gen_window_check2(dc, r1, r2 > r3 ? r2 : r3); |
850 |
} |
851 |
|
852 |
static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned) |
853 |
{ |
854 |
TCGv_i32 m = tcg_temp_new_i32(); |
855 |
|
856 |
if (hi) {
|
857 |
(is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
|
858 |
} else {
|
859 |
(is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v); |
860 |
} |
861 |
return m;
|
862 |
} |
863 |
|
864 |
static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) |
865 |
{ |
866 |
#define HAS_OPTION_BITS(opt) do { \ |
867 |
if (!option_bits_enabled(dc, opt)) { \
|
868 |
qemu_log("Option is not enabled %s:%d\n", \
|
869 |
__FILE__, __LINE__); \ |
870 |
goto invalid_opcode; \
|
871 |
} \ |
872 |
} while (0) |
873 |
|
874 |
#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
|
875 |
|
876 |
#define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__) |
877 |
#define RESERVED() do { \ |
878 |
qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
|
879 |
dc->pc, b0, b1, b2, __FILE__, __LINE__); \ |
880 |
goto invalid_opcode; \
|
881 |
} while (0) |
882 |
|
883 |
|
884 |
#ifdef TARGET_WORDS_BIGENDIAN
|
885 |
#define OP0 (((b0) & 0xf0) >> 4) |
886 |
#define OP1 (((b2) & 0xf0) >> 4) |
887 |
#define OP2 ((b2) & 0xf) |
888 |
#define RRR_R ((b1) & 0xf) |
889 |
#define RRR_S (((b1) & 0xf0) >> 4) |
890 |
#define RRR_T ((b0) & 0xf) |
891 |
#else
|
892 |
#define OP0 (((b0) & 0xf)) |
893 |
#define OP1 (((b2) & 0xf)) |
894 |
#define OP2 (((b2) & 0xf0) >> 4) |
895 |
#define RRR_R (((b1) & 0xf0) >> 4) |
896 |
#define RRR_S (((b1) & 0xf)) |
897 |
#define RRR_T (((b0) & 0xf0) >> 4) |
898 |
#endif
|
899 |
#define RRR_X ((RRR_R & 0x4) >> 2) |
900 |
#define RRR_Y ((RRR_T & 0x4) >> 2) |
901 |
#define RRR_W (RRR_R & 0x3) |
902 |
|
903 |
#define RRRN_R RRR_R
|
904 |
#define RRRN_S RRR_S
|
905 |
#define RRRN_T RRR_T
|
906 |
|
907 |
#define RRI8_R RRR_R
|
908 |
#define RRI8_S RRR_S
|
909 |
#define RRI8_T RRR_T
|
910 |
#define RRI8_IMM8 (b2)
|
911 |
#define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8) |
912 |
|
913 |
#ifdef TARGET_WORDS_BIGENDIAN
|
914 |
#define RI16_IMM16 (((b1) << 8) | (b2)) |
915 |
#else
|
916 |
#define RI16_IMM16 (((b2) << 8) | (b1)) |
917 |
#endif
|
918 |
|
919 |
#ifdef TARGET_WORDS_BIGENDIAN
|
920 |
#define CALL_N (((b0) & 0xc) >> 2) |
921 |
#define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2)) |
922 |
#else
|
923 |
#define CALL_N (((b0) & 0x30) >> 4) |
924 |
#define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10)) |
925 |
#endif
|
926 |
#define CALL_OFFSET_SE \
|
927 |
(((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET) |
928 |
|
929 |
#define CALLX_N CALL_N
|
930 |
#ifdef TARGET_WORDS_BIGENDIAN
|
931 |
#define CALLX_M ((b0) & 0x3) |
932 |
#else
|
933 |
#define CALLX_M (((b0) & 0xc0) >> 6) |
934 |
#endif
|
935 |
#define CALLX_S RRR_S
|
936 |
|
937 |
#define BRI12_M CALLX_M
|
938 |
#define BRI12_S RRR_S
|
939 |
#ifdef TARGET_WORDS_BIGENDIAN
|
940 |
#define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2)) |
941 |
#else
|
942 |
#define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4)) |
943 |
#endif
|
944 |
#define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12) |
945 |
|
946 |
#define BRI8_M BRI12_M
|
947 |
#define BRI8_R RRI8_R
|
948 |
#define BRI8_S RRI8_S
|
949 |
#define BRI8_IMM8 RRI8_IMM8
|
950 |
#define BRI8_IMM8_SE RRI8_IMM8_SE
|
951 |
|
952 |
#define RSR_SR (b1)
|
953 |
|
954 |
uint8_t b0 = cpu_ldub_code(env, dc->pc); |
955 |
uint8_t b1 = cpu_ldub_code(env, dc->pc + 1);
|
956 |
uint8_t b2 = 0;
|
957 |
|
958 |
static const uint32_t B4CONST[] = { |
959 |
0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 |
960 |
}; |
961 |
|
962 |
static const uint32_t B4CONSTU[] = { |
963 |
32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 |
964 |
}; |
965 |
|
966 |
if (OP0 >= 8) { |
967 |
dc->next_pc = dc->pc + 2;
|
968 |
HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); |
969 |
} else {
|
970 |
dc->next_pc = dc->pc + 3;
|
971 |
b2 = cpu_ldub_code(env, dc->pc + 2);
|
972 |
} |
973 |
|
974 |
switch (OP0) {
|
975 |
case 0: /*QRST*/ |
976 |
switch (OP1) {
|
977 |
case 0: /*RST0*/ |
978 |
switch (OP2) {
|
979 |
case 0: /*ST0*/ |
980 |
if ((RRR_R & 0xc) == 0x8) { |
981 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
982 |
} |
983 |
|
984 |
switch (RRR_R) {
|
985 |
case 0: /*SNM0*/ |
986 |
switch (CALLX_M) {
|
987 |
case 0: /*ILL*/ |
988 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
989 |
break;
|
990 |
|
991 |
case 1: /*reserved*/ |
992 |
RESERVED(); |
993 |
break;
|
994 |
|
995 |
case 2: /*JR*/ |
996 |
switch (CALLX_N) {
|
997 |
case 0: /*RET*/ |
998 |
case 2: /*JX*/ |
999 |
gen_window_check1(dc, CALLX_S); |
1000 |
gen_jump(dc, cpu_R[CALLX_S]); |
1001 |
break;
|
1002 |
|
1003 |
case 1: /*RETWw*/ |
1004 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1005 |
{ |
1006 |
TCGv_i32 tmp = tcg_const_i32(dc->pc); |
1007 |
gen_advance_ccount(dc); |
1008 |
gen_helper_retw(tmp, cpu_env, tmp); |
1009 |
gen_jump(dc, tmp); |
1010 |
tcg_temp_free(tmp); |
1011 |
} |
1012 |
break;
|
1013 |
|
1014 |
case 3: /*reserved*/ |
1015 |
RESERVED(); |
1016 |
break;
|
1017 |
} |
1018 |
break;
|
1019 |
|
1020 |
case 3: /*CALLX*/ |
1021 |
gen_window_check2(dc, CALLX_S, CALLX_N << 2);
|
1022 |
switch (CALLX_N) {
|
1023 |
case 0: /*CALLX0*/ |
1024 |
{ |
1025 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1026 |
tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); |
1027 |
tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
|
1028 |
gen_jump(dc, tmp); |
1029 |
tcg_temp_free(tmp); |
1030 |
} |
1031 |
break;
|
1032 |
|
1033 |
case 1: /*CALLX4w*/ |
1034 |
case 2: /*CALLX8w*/ |
1035 |
case 3: /*CALLX12w*/ |
1036 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1037 |
{ |
1038 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1039 |
|
1040 |
tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); |
1041 |
gen_callw(dc, CALLX_N, tmp); |
1042 |
tcg_temp_free(tmp); |
1043 |
} |
1044 |
break;
|
1045 |
} |
1046 |
break;
|
1047 |
} |
1048 |
break;
|
1049 |
|
1050 |
case 1: /*MOVSPw*/ |
1051 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1052 |
gen_window_check2(dc, RRR_T, RRR_S); |
1053 |
{ |
1054 |
TCGv_i32 pc = tcg_const_i32(dc->pc); |
1055 |
gen_advance_ccount(dc); |
1056 |
gen_helper_movsp(cpu_env, pc); |
1057 |
tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); |
1058 |
tcg_temp_free(pc); |
1059 |
} |
1060 |
break;
|
1061 |
|
1062 |
case 2: /*SYNC*/ |
1063 |
switch (RRR_T) {
|
1064 |
case 0: /*ISYNC*/ |
1065 |
break;
|
1066 |
|
1067 |
case 1: /*RSYNC*/ |
1068 |
break;
|
1069 |
|
1070 |
case 2: /*ESYNC*/ |
1071 |
break;
|
1072 |
|
1073 |
case 3: /*DSYNC*/ |
1074 |
break;
|
1075 |
|
1076 |
case 8: /*EXCW*/ |
1077 |
HAS_OPTION(XTENSA_OPTION_EXCEPTION); |
1078 |
break;
|
1079 |
|
1080 |
case 12: /*MEMW*/ |
1081 |
break;
|
1082 |
|
1083 |
case 13: /*EXTW*/ |
1084 |
break;
|
1085 |
|
1086 |
case 15: /*NOP*/ |
1087 |
break;
|
1088 |
|
1089 |
default: /*reserved*/ |
1090 |
RESERVED(); |
1091 |
break;
|
1092 |
} |
1093 |
break;
|
1094 |
|
1095 |
case 3: /*RFEIx*/ |
1096 |
switch (RRR_T) {
|
1097 |
case 0: /*RFETx*/ |
1098 |
HAS_OPTION(XTENSA_OPTION_EXCEPTION); |
1099 |
switch (RRR_S) {
|
1100 |
case 0: /*RFEx*/ |
1101 |
gen_check_privilege(dc); |
1102 |
tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); |
1103 |
gen_helper_check_interrupts(cpu_env); |
1104 |
gen_jump(dc, cpu_SR[EPC1]); |
1105 |
break;
|
1106 |
|
1107 |
case 1: /*RFUEx*/ |
1108 |
RESERVED(); |
1109 |
break;
|
1110 |
|
1111 |
case 2: /*RFDEx*/ |
1112 |
gen_check_privilege(dc); |
1113 |
gen_jump(dc, cpu_SR[ |
1114 |
dc->config->ndepc ? DEPC : EPC1]); |
1115 |
break;
|
1116 |
|
1117 |
case 4: /*RFWOw*/ |
1118 |
case 5: /*RFWUw*/ |
1119 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1120 |
gen_check_privilege(dc); |
1121 |
{ |
1122 |
TCGv_i32 tmp = tcg_const_i32(1);
|
1123 |
|
1124 |
tcg_gen_andi_i32( |
1125 |
cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); |
1126 |
tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); |
1127 |
|
1128 |
if (RRR_S == 4) { |
1129 |
tcg_gen_andc_i32(cpu_SR[WINDOW_START], |
1130 |
cpu_SR[WINDOW_START], tmp); |
1131 |
} else {
|
1132 |
tcg_gen_or_i32(cpu_SR[WINDOW_START], |
1133 |
cpu_SR[WINDOW_START], tmp); |
1134 |
} |
1135 |
|
1136 |
gen_helper_restore_owb(cpu_env); |
1137 |
gen_helper_check_interrupts(cpu_env); |
1138 |
gen_jump(dc, cpu_SR[EPC1]); |
1139 |
|
1140 |
tcg_temp_free(tmp); |
1141 |
} |
1142 |
break;
|
1143 |
|
1144 |
default: /*reserved*/ |
1145 |
RESERVED(); |
1146 |
break;
|
1147 |
} |
1148 |
break;
|
1149 |
|
1150 |
case 1: /*RFIx*/ |
1151 |
HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT); |
1152 |
if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) { |
1153 |
gen_check_privilege(dc); |
1154 |
tcg_gen_mov_i32(cpu_SR[PS], |
1155 |
cpu_SR[EPS2 + RRR_S - 2]);
|
1156 |
gen_helper_check_interrupts(cpu_env); |
1157 |
gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
|
1158 |
} else {
|
1159 |
qemu_log("RFI %d is illegal\n", RRR_S);
|
1160 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
1161 |
} |
1162 |
break;
|
1163 |
|
1164 |
case 2: /*RFME*/ |
1165 |
TBD(); |
1166 |
break;
|
1167 |
|
1168 |
default: /*reserved*/ |
1169 |
RESERVED(); |
1170 |
break;
|
1171 |
|
1172 |
} |
1173 |
break;
|
1174 |
|
1175 |
case 4: /*BREAKx*/ |
1176 |
HAS_OPTION(XTENSA_OPTION_DEBUG); |
1177 |
if (dc->debug) {
|
1178 |
gen_debug_exception(dc, DEBUGCAUSE_BI); |
1179 |
} |
1180 |
break;
|
1181 |
|
1182 |
case 5: /*SYSCALLx*/ |
1183 |
HAS_OPTION(XTENSA_OPTION_EXCEPTION); |
1184 |
switch (RRR_S) {
|
1185 |
case 0: /*SYSCALLx*/ |
1186 |
gen_exception_cause(dc, SYSCALL_CAUSE); |
1187 |
break;
|
1188 |
|
1189 |
case 1: /*SIMCALL*/ |
1190 |
if (semihosting_enabled) {
|
1191 |
gen_check_privilege(dc); |
1192 |
gen_helper_simcall(cpu_env); |
1193 |
} else {
|
1194 |
qemu_log("SIMCALL but semihosting is disabled\n");
|
1195 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
1196 |
} |
1197 |
break;
|
1198 |
|
1199 |
default:
|
1200 |
RESERVED(); |
1201 |
break;
|
1202 |
} |
1203 |
break;
|
1204 |
|
1205 |
case 6: /*RSILx*/ |
1206 |
HAS_OPTION(XTENSA_OPTION_INTERRUPT); |
1207 |
gen_check_privilege(dc); |
1208 |
gen_window_check1(dc, RRR_T); |
1209 |
tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); |
1210 |
tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); |
1211 |
tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); |
1212 |
gen_helper_check_interrupts(cpu_env); |
1213 |
gen_jumpi_check_loop_end(dc, 0);
|
1214 |
break;
|
1215 |
|
1216 |
case 7: /*WAITIx*/ |
1217 |
HAS_OPTION(XTENSA_OPTION_INTERRUPT); |
1218 |
gen_check_privilege(dc); |
1219 |
gen_waiti(dc, RRR_S); |
1220 |
break;
|
1221 |
|
1222 |
case 8: /*ANY4p*/ |
1223 |
case 9: /*ALL4p*/ |
1224 |
case 10: /*ANY8p*/ |
1225 |
case 11: /*ALL8p*/ |
1226 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
1227 |
{ |
1228 |
const unsigned shift = (RRR_R & 2) ? 8 : 4; |
1229 |
TCGv_i32 mask = tcg_const_i32( |
1230 |
((1 << shift) - 1) << RRR_S); |
1231 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1232 |
|
1233 |
tcg_gen_and_i32(tmp, cpu_SR[BR], mask); |
1234 |
if (RRR_R & 1) { /*ALL*/ |
1235 |
tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
|
1236 |
} else { /*ANY*/ |
1237 |
tcg_gen_add_i32(tmp, tmp, mask); |
1238 |
} |
1239 |
tcg_gen_shri_i32(tmp, tmp, RRR_S + shift); |
1240 |
tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], |
1241 |
tmp, RRR_T, 1);
|
1242 |
tcg_temp_free(mask); |
1243 |
tcg_temp_free(tmp); |
1244 |
} |
1245 |
break;
|
1246 |
|
1247 |
default: /*reserved*/ |
1248 |
RESERVED(); |
1249 |
break;
|
1250 |
|
1251 |
} |
1252 |
break;
|
1253 |
|
1254 |
case 1: /*AND*/ |
1255 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1256 |
tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1257 |
break;
|
1258 |
|
1259 |
case 2: /*OR*/ |
1260 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1261 |
tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1262 |
break;
|
1263 |
|
1264 |
case 3: /*XOR*/ |
1265 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1266 |
tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1267 |
break;
|
1268 |
|
1269 |
case 4: /*ST1*/ |
1270 |
switch (RRR_R) {
|
1271 |
case 0: /*SSR*/ |
1272 |
gen_window_check1(dc, RRR_S); |
1273 |
gen_right_shift_sar(dc, cpu_R[RRR_S]); |
1274 |
break;
|
1275 |
|
1276 |
case 1: /*SSL*/ |
1277 |
gen_window_check1(dc, RRR_S); |
1278 |
gen_left_shift_sar(dc, cpu_R[RRR_S]); |
1279 |
break;
|
1280 |
|
1281 |
case 2: /*SSA8L*/ |
1282 |
gen_window_check1(dc, RRR_S); |
1283 |
{ |
1284 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1285 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
|
1286 |
gen_right_shift_sar(dc, tmp); |
1287 |
tcg_temp_free(tmp); |
1288 |
} |
1289 |
break;
|
1290 |
|
1291 |
case 3: /*SSA8B*/ |
1292 |
gen_window_check1(dc, RRR_S); |
1293 |
{ |
1294 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1295 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
|
1296 |
gen_left_shift_sar(dc, tmp); |
1297 |
tcg_temp_free(tmp); |
1298 |
} |
1299 |
break;
|
1300 |
|
1301 |
case 4: /*SSAI*/ |
1302 |
{ |
1303 |
TCGv_i32 tmp = tcg_const_i32( |
1304 |
RRR_S | ((RRR_T & 1) << 4)); |
1305 |
gen_right_shift_sar(dc, tmp); |
1306 |
tcg_temp_free(tmp); |
1307 |
} |
1308 |
break;
|
1309 |
|
1310 |
case 6: /*RER*/ |
1311 |
TBD(); |
1312 |
break;
|
1313 |
|
1314 |
case 7: /*WER*/ |
1315 |
TBD(); |
1316 |
break;
|
1317 |
|
1318 |
case 8: /*ROTWw*/ |
1319 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1320 |
gen_check_privilege(dc); |
1321 |
{ |
1322 |
TCGv_i32 tmp = tcg_const_i32( |
1323 |
RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); |
1324 |
gen_helper_rotw(cpu_env, tmp); |
1325 |
tcg_temp_free(tmp); |
1326 |
reset_used_window(dc); |
1327 |
} |
1328 |
break;
|
1329 |
|
1330 |
case 14: /*NSAu*/ |
1331 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); |
1332 |
gen_window_check2(dc, RRR_S, RRR_T); |
1333 |
gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]); |
1334 |
break;
|
1335 |
|
1336 |
case 15: /*NSAUu*/ |
1337 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); |
1338 |
gen_window_check2(dc, RRR_S, RRR_T); |
1339 |
gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]); |
1340 |
break;
|
1341 |
|
1342 |
default: /*reserved*/ |
1343 |
RESERVED(); |
1344 |
break;
|
1345 |
} |
1346 |
break;
|
1347 |
|
1348 |
case 5: /*TLB*/ |
1349 |
HAS_OPTION_BITS( |
1350 |
XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) | |
1351 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
1352 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION)); |
1353 |
gen_check_privilege(dc); |
1354 |
gen_window_check2(dc, RRR_S, RRR_T); |
1355 |
{ |
1356 |
TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0); |
1357 |
|
1358 |
switch (RRR_R & 7) { |
1359 |
case 3: /*RITLB0*/ /*RDTLB0*/ |
1360 |
gen_helper_rtlb0(cpu_R[RRR_T], |
1361 |
cpu_env, cpu_R[RRR_S], dtlb); |
1362 |
break;
|
1363 |
|
1364 |
case 4: /*IITLB*/ /*IDTLB*/ |
1365 |
gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb); |
1366 |
/* This could change memory mapping, so exit tb */
|
1367 |
gen_jumpi_check_loop_end(dc, -1);
|
1368 |
break;
|
1369 |
|
1370 |
case 5: /*PITLB*/ /*PDTLB*/ |
1371 |
tcg_gen_movi_i32(cpu_pc, dc->pc); |
1372 |
gen_helper_ptlb(cpu_R[RRR_T], |
1373 |
cpu_env, cpu_R[RRR_S], dtlb); |
1374 |
break;
|
1375 |
|
1376 |
case 6: /*WITLB*/ /*WDTLB*/ |
1377 |
gen_helper_wtlb( |
1378 |
cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb); |
1379 |
/* This could change memory mapping, so exit tb */
|
1380 |
gen_jumpi_check_loop_end(dc, -1);
|
1381 |
break;
|
1382 |
|
1383 |
case 7: /*RITLB1*/ /*RDTLB1*/ |
1384 |
gen_helper_rtlb1(cpu_R[RRR_T], |
1385 |
cpu_env, cpu_R[RRR_S], dtlb); |
1386 |
break;
|
1387 |
|
1388 |
default:
|
1389 |
tcg_temp_free(dtlb); |
1390 |
RESERVED(); |
1391 |
break;
|
1392 |
} |
1393 |
tcg_temp_free(dtlb); |
1394 |
} |
1395 |
break;
|
1396 |
|
1397 |
case 6: /*RT0*/ |
1398 |
gen_window_check2(dc, RRR_R, RRR_T); |
1399 |
switch (RRR_S) {
|
1400 |
case 0: /*NEG*/ |
1401 |
tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); |
1402 |
break;
|
1403 |
|
1404 |
case 1: /*ABS*/ |
1405 |
{ |
1406 |
TCGv_i32 zero = tcg_const_i32(0);
|
1407 |
TCGv_i32 neg = tcg_temp_new_i32(); |
1408 |
|
1409 |
tcg_gen_neg_i32(neg, cpu_R[RRR_T]); |
1410 |
tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R], |
1411 |
cpu_R[RRR_T], zero, cpu_R[RRR_T], neg); |
1412 |
tcg_temp_free(neg); |
1413 |
tcg_temp_free(zero); |
1414 |
} |
1415 |
break;
|
1416 |
|
1417 |
default: /*reserved*/ |
1418 |
RESERVED(); |
1419 |
break;
|
1420 |
} |
1421 |
break;
|
1422 |
|
1423 |
case 7: /*reserved*/ |
1424 |
RESERVED(); |
1425 |
break;
|
1426 |
|
1427 |
case 8: /*ADD*/ |
1428 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1429 |
tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1430 |
break;
|
1431 |
|
1432 |
case 9: /*ADD**/ |
1433 |
case 10: |
1434 |
case 11: |
1435 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1436 |
{ |
1437 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1438 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
|
1439 |
tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); |
1440 |
tcg_temp_free(tmp); |
1441 |
} |
1442 |
break;
|
1443 |
|
1444 |
case 12: /*SUB*/ |
1445 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1446 |
tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1447 |
break;
|
1448 |
|
1449 |
case 13: /*SUB**/ |
1450 |
case 14: |
1451 |
case 15: |
1452 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1453 |
{ |
1454 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1455 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
|
1456 |
tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); |
1457 |
tcg_temp_free(tmp); |
1458 |
} |
1459 |
break;
|
1460 |
} |
1461 |
break;
|
1462 |
|
1463 |
case 1: /*RST1*/ |
1464 |
switch (OP2) {
|
1465 |
case 0: /*SLLI*/ |
1466 |
case 1: |
1467 |
gen_window_check2(dc, RRR_R, RRR_S); |
1468 |
tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S], |
1469 |
32 - (RRR_T | ((OP2 & 1) << 4))); |
1470 |
break;
|
1471 |
|
1472 |
case 2: /*SRAI*/ |
1473 |
case 3: |
1474 |
gen_window_check2(dc, RRR_R, RRR_T); |
1475 |
tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T], |
1476 |
RRR_S | ((OP2 & 1) << 4)); |
1477 |
break;
|
1478 |
|
1479 |
case 4: /*SRLI*/ |
1480 |
gen_window_check2(dc, RRR_R, RRR_T); |
1481 |
tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S); |
1482 |
break;
|
1483 |
|
1484 |
case 6: /*XSR*/ |
1485 |
{ |
1486 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1487 |
gen_check_sr(dc, RSR_SR, SR_X); |
1488 |
if (RSR_SR >= 64) { |
1489 |
gen_check_privilege(dc); |
1490 |
} |
1491 |
gen_window_check1(dc, RRR_T); |
1492 |
tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); |
1493 |
gen_rsr(dc, cpu_R[RRR_T], RSR_SR); |
1494 |
gen_wsr(dc, RSR_SR, tmp); |
1495 |
tcg_temp_free(tmp); |
1496 |
} |
1497 |
break;
|
1498 |
|
1499 |
/*
|
1500 |
* Note: 64 bit ops are used here solely because SAR values
|
1501 |
* have range 0..63
|
1502 |
*/
|
1503 |
#define gen_shift_reg(cmd, reg) do { \ |
1504 |
TCGv_i64 tmp = tcg_temp_new_i64(); \ |
1505 |
tcg_gen_extu_i32_i64(tmp, reg); \ |
1506 |
tcg_gen_##cmd##_i64(v, v, tmp); \ |
1507 |
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \ |
1508 |
tcg_temp_free_i64(v); \ |
1509 |
tcg_temp_free_i64(tmp); \ |
1510 |
} while (0) |
1511 |
|
1512 |
#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
|
1513 |
|
1514 |
case 8: /*SRC*/ |
1515 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1516 |
{ |
1517 |
TCGv_i64 v = tcg_temp_new_i64(); |
1518 |
tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]); |
1519 |
gen_shift(shr); |
1520 |
} |
1521 |
break;
|
1522 |
|
1523 |
case 9: /*SRL*/ |
1524 |
gen_window_check2(dc, RRR_R, RRR_T); |
1525 |
if (dc->sar_5bit) {
|
1526 |
tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); |
1527 |
} else {
|
1528 |
TCGv_i64 v = tcg_temp_new_i64(); |
1529 |
tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]); |
1530 |
gen_shift(shr); |
1531 |
} |
1532 |
break;
|
1533 |
|
1534 |
case 10: /*SLL*/ |
1535 |
gen_window_check2(dc, RRR_R, RRR_S); |
1536 |
if (dc->sar_m32_5bit) {
|
1537 |
tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32); |
1538 |
} else {
|
1539 |
TCGv_i64 v = tcg_temp_new_i64(); |
1540 |
TCGv_i32 s = tcg_const_i32(32);
|
1541 |
tcg_gen_sub_i32(s, s, cpu_SR[SAR]); |
1542 |
tcg_gen_andi_i32(s, s, 0x3f);
|
1543 |
tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]); |
1544 |
gen_shift_reg(shl, s); |
1545 |
tcg_temp_free(s); |
1546 |
} |
1547 |
break;
|
1548 |
|
1549 |
case 11: /*SRA*/ |
1550 |
gen_window_check2(dc, RRR_R, RRR_T); |
1551 |
if (dc->sar_5bit) {
|
1552 |
tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); |
1553 |
} else {
|
1554 |
TCGv_i64 v = tcg_temp_new_i64(); |
1555 |
tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]); |
1556 |
gen_shift(sar); |
1557 |
} |
1558 |
break;
|
1559 |
#undef gen_shift
|
1560 |
#undef gen_shift_reg
|
1561 |
|
1562 |
case 12: /*MUL16U*/ |
1563 |
HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); |
1564 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1565 |
{ |
1566 |
TCGv_i32 v1 = tcg_temp_new_i32(); |
1567 |
TCGv_i32 v2 = tcg_temp_new_i32(); |
1568 |
tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]); |
1569 |
tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]); |
1570 |
tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); |
1571 |
tcg_temp_free(v2); |
1572 |
tcg_temp_free(v1); |
1573 |
} |
1574 |
break;
|
1575 |
|
1576 |
case 13: /*MUL16S*/ |
1577 |
HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); |
1578 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1579 |
{ |
1580 |
TCGv_i32 v1 = tcg_temp_new_i32(); |
1581 |
TCGv_i32 v2 = tcg_temp_new_i32(); |
1582 |
tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]); |
1583 |
tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]); |
1584 |
tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); |
1585 |
tcg_temp_free(v2); |
1586 |
tcg_temp_free(v1); |
1587 |
} |
1588 |
break;
|
1589 |
|
1590 |
default: /*reserved*/ |
1591 |
RESERVED(); |
1592 |
break;
|
1593 |
} |
1594 |
break;
|
1595 |
|
1596 |
case 2: /*RST2*/ |
1597 |
if (OP2 >= 8) { |
1598 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1599 |
} |
1600 |
|
1601 |
if (OP2 >= 12) { |
1602 |
HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV); |
1603 |
int label = gen_new_label();
|
1604 |
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
|
1605 |
gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); |
1606 |
gen_set_label(label); |
1607 |
} |
1608 |
|
1609 |
switch (OP2) {
|
1610 |
#define BOOLEAN_LOGIC(fn, r, s, t) \
|
1611 |
do { \
|
1612 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); \ |
1613 |
TCGv_i32 tmp1 = tcg_temp_new_i32(); \ |
1614 |
TCGv_i32 tmp2 = tcg_temp_new_i32(); \ |
1615 |
\ |
1616 |
tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \ |
1617 |
tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \ |
1618 |
tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \ |
1619 |
tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
|
1620 |
tcg_temp_free(tmp1); \ |
1621 |
tcg_temp_free(tmp2); \ |
1622 |
} while (0) |
1623 |
|
1624 |
case 0: /*ANDBp*/ |
1625 |
BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T); |
1626 |
break;
|
1627 |
|
1628 |
case 1: /*ANDBCp*/ |
1629 |
BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T); |
1630 |
break;
|
1631 |
|
1632 |
case 2: /*ORBp*/ |
1633 |
BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T); |
1634 |
break;
|
1635 |
|
1636 |
case 3: /*ORBCp*/ |
1637 |
BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T); |
1638 |
break;
|
1639 |
|
1640 |
case 4: /*XORBp*/ |
1641 |
BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T); |
1642 |
break;
|
1643 |
|
1644 |
#undef BOOLEAN_LOGIC
|
1645 |
|
1646 |
case 8: /*MULLi*/ |
1647 |
HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL); |
1648 |
tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1649 |
break;
|
1650 |
|
1651 |
case 10: /*MULUHi*/ |
1652 |
case 11: /*MULSHi*/ |
1653 |
HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH); |
1654 |
{ |
1655 |
TCGv_i64 r = tcg_temp_new_i64(); |
1656 |
TCGv_i64 s = tcg_temp_new_i64(); |
1657 |
TCGv_i64 t = tcg_temp_new_i64(); |
1658 |
|
1659 |
if (OP2 == 10) { |
1660 |
tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]); |
1661 |
tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]); |
1662 |
} else {
|
1663 |
tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]); |
1664 |
tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]); |
1665 |
} |
1666 |
tcg_gen_mul_i64(r, s, t); |
1667 |
tcg_gen_shri_i64(r, r, 32);
|
1668 |
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r); |
1669 |
|
1670 |
tcg_temp_free_i64(r); |
1671 |
tcg_temp_free_i64(s); |
1672 |
tcg_temp_free_i64(t); |
1673 |
} |
1674 |
break;
|
1675 |
|
1676 |
case 12: /*QUOUi*/ |
1677 |
tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1678 |
break;
|
1679 |
|
1680 |
case 13: /*QUOSi*/ |
1681 |
case 15: /*REMSi*/ |
1682 |
{ |
1683 |
int label1 = gen_new_label();
|
1684 |
int label2 = gen_new_label();
|
1685 |
|
1686 |
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
|
1687 |
label1); |
1688 |
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
|
1689 |
label1); |
1690 |
tcg_gen_movi_i32(cpu_R[RRR_R], |
1691 |
OP2 == 13 ? 0x80000000 : 0); |
1692 |
tcg_gen_br(label2); |
1693 |
gen_set_label(label1); |
1694 |
if (OP2 == 13) { |
1695 |
tcg_gen_div_i32(cpu_R[RRR_R], |
1696 |
cpu_R[RRR_S], cpu_R[RRR_T]); |
1697 |
} else {
|
1698 |
tcg_gen_rem_i32(cpu_R[RRR_R], |
1699 |
cpu_R[RRR_S], cpu_R[RRR_T]); |
1700 |
} |
1701 |
gen_set_label(label2); |
1702 |
} |
1703 |
break;
|
1704 |
|
1705 |
case 14: /*REMUi*/ |
1706 |
tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1707 |
break;
|
1708 |
|
1709 |
default: /*reserved*/ |
1710 |
RESERVED(); |
1711 |
break;
|
1712 |
} |
1713 |
break;
|
1714 |
|
1715 |
case 3: /*RST3*/ |
1716 |
switch (OP2) {
|
1717 |
case 0: /*RSR*/ |
1718 |
gen_check_sr(dc, RSR_SR, SR_R); |
1719 |
if (RSR_SR >= 64) { |
1720 |
gen_check_privilege(dc); |
1721 |
} |
1722 |
gen_window_check1(dc, RRR_T); |
1723 |
gen_rsr(dc, cpu_R[RRR_T], RSR_SR); |
1724 |
break;
|
1725 |
|
1726 |
case 1: /*WSR*/ |
1727 |
gen_check_sr(dc, RSR_SR, SR_W); |
1728 |
if (RSR_SR >= 64) { |
1729 |
gen_check_privilege(dc); |
1730 |
} |
1731 |
gen_window_check1(dc, RRR_T); |
1732 |
gen_wsr(dc, RSR_SR, cpu_R[RRR_T]); |
1733 |
break;
|
1734 |
|
1735 |
case 2: /*SEXTu*/ |
1736 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT); |
1737 |
gen_window_check2(dc, RRR_R, RRR_S); |
1738 |
{ |
1739 |
int shift = 24 - RRR_T; |
1740 |
|
1741 |
if (shift == 24) { |
1742 |
tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1743 |
} else if (shift == 16) { |
1744 |
tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1745 |
} else {
|
1746 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1747 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift); |
1748 |
tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift); |
1749 |
tcg_temp_free(tmp); |
1750 |
} |
1751 |
} |
1752 |
break;
|
1753 |
|
1754 |
case 3: /*CLAMPSu*/ |
1755 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS); |
1756 |
gen_window_check2(dc, RRR_R, RRR_S); |
1757 |
{ |
1758 |
TCGv_i32 tmp1 = tcg_temp_new_i32(); |
1759 |
TCGv_i32 tmp2 = tcg_temp_new_i32(); |
1760 |
TCGv_i32 zero = tcg_const_i32(0);
|
1761 |
|
1762 |
tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
|
1763 |
tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]); |
1764 |
tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7)); |
1765 |
|
1766 |
tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
|
1767 |
tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T)); |
1768 |
|
1769 |
tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero, |
1770 |
cpu_R[RRR_S], tmp1); |
1771 |
tcg_temp_free(tmp1); |
1772 |
tcg_temp_free(tmp2); |
1773 |
tcg_temp_free(zero); |
1774 |
} |
1775 |
break;
|
1776 |
|
1777 |
case 4: /*MINu*/ |
1778 |
case 5: /*MAXu*/ |
1779 |
case 6: /*MINUu*/ |
1780 |
case 7: /*MAXUu*/ |
1781 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX); |
1782 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1783 |
{ |
1784 |
static const TCGCond cond[] = { |
1785 |
TCG_COND_LE, |
1786 |
TCG_COND_GE, |
1787 |
TCG_COND_LEU, |
1788 |
TCG_COND_GEU |
1789 |
}; |
1790 |
tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R],
|
1791 |
cpu_R[RRR_S], cpu_R[RRR_T], |
1792 |
cpu_R[RRR_S], cpu_R[RRR_T]); |
1793 |
} |
1794 |
break;
|
1795 |
|
1796 |
case 8: /*MOVEQZ*/ |
1797 |
case 9: /*MOVNEZ*/ |
1798 |
case 10: /*MOVLTZ*/ |
1799 |
case 11: /*MOVGEZ*/ |
1800 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1801 |
{ |
1802 |
static const TCGCond cond[] = { |
1803 |
TCG_COND_EQ, |
1804 |
TCG_COND_NE, |
1805 |
TCG_COND_LT, |
1806 |
TCG_COND_GE, |
1807 |
}; |
1808 |
TCGv_i32 zero = tcg_const_i32(0);
|
1809 |
|
1810 |
tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R],
|
1811 |
cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]); |
1812 |
tcg_temp_free(zero); |
1813 |
} |
1814 |
break;
|
1815 |
|
1816 |
case 12: /*MOVFp*/ |
1817 |
case 13: /*MOVTp*/ |
1818 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
1819 |
gen_window_check2(dc, RRR_R, RRR_S); |
1820 |
{ |
1821 |
TCGv_i32 zero = tcg_const_i32(0);
|
1822 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1823 |
|
1824 |
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
|
1825 |
tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
|
1826 |
cpu_R[RRR_R], tmp, zero, |
1827 |
cpu_R[RRR_S], cpu_R[RRR_R]); |
1828 |
|
1829 |
tcg_temp_free(tmp); |
1830 |
tcg_temp_free(zero); |
1831 |
} |
1832 |
break;
|
1833 |
|
1834 |
case 14: /*RUR*/ |
1835 |
gen_window_check1(dc, RRR_R); |
1836 |
{ |
1837 |
int st = (RRR_S << 4) + RRR_T; |
1838 |
if (uregnames[st].name) {
|
1839 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]); |
1840 |
} else {
|
1841 |
qemu_log("RUR %d not implemented, ", st);
|
1842 |
TBD(); |
1843 |
} |
1844 |
} |
1845 |
break;
|
1846 |
|
1847 |
case 15: /*WUR*/ |
1848 |
gen_window_check1(dc, RRR_T); |
1849 |
if (uregnames[RSR_SR].name) {
|
1850 |
gen_wur(RSR_SR, cpu_R[RRR_T]); |
1851 |
} else {
|
1852 |
qemu_log("WUR %d not implemented, ", RSR_SR);
|
1853 |
TBD(); |
1854 |
} |
1855 |
break;
|
1856 |
|
1857 |
} |
1858 |
break;
|
1859 |
|
1860 |
case 4: /*EXTUI*/ |
1861 |
case 5: |
1862 |
gen_window_check2(dc, RRR_R, RRR_T); |
1863 |
{ |
1864 |
int shiftimm = RRR_S | ((OP1 & 1) << 4); |
1865 |
int maskimm = (1 << (OP2 + 1)) - 1; |
1866 |
|
1867 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1868 |
tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm); |
1869 |
tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm); |
1870 |
tcg_temp_free(tmp); |
1871 |
} |
1872 |
break;
|
1873 |
|
1874 |
case 6: /*CUST0*/ |
1875 |
RESERVED(); |
1876 |
break;
|
1877 |
|
1878 |
case 7: /*CUST1*/ |
1879 |
RESERVED(); |
1880 |
break;
|
1881 |
|
1882 |
case 8: /*LSCXp*/ |
1883 |
switch (OP2) {
|
1884 |
case 0: /*LSXf*/ |
1885 |
case 1: /*LSXUf*/ |
1886 |
case 4: /*SSXf*/ |
1887 |
case 5: /*SSXUf*/ |
1888 |
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); |
1889 |
gen_window_check2(dc, RRR_S, RRR_T); |
1890 |
gen_check_cpenable(dc, 0);
|
1891 |
{ |
1892 |
TCGv_i32 addr = tcg_temp_new_i32(); |
1893 |
tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]); |
1894 |
gen_load_store_alignment(dc, 2, addr, false); |
1895 |
if (OP2 & 0x4) { |
1896 |
tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring); |
1897 |
} else {
|
1898 |
tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring); |
1899 |
} |
1900 |
if (OP2 & 0x1) { |
1901 |
tcg_gen_mov_i32(cpu_R[RRR_S], addr); |
1902 |
} |
1903 |
tcg_temp_free(addr); |
1904 |
} |
1905 |
break;
|
1906 |
|
1907 |
default: /*reserved*/ |
1908 |
RESERVED(); |
1909 |
break;
|
1910 |
} |
1911 |
break;
|
1912 |
|
1913 |
case 9: /*LSC4*/ |
1914 |
gen_window_check2(dc, RRR_S, RRR_T); |
1915 |
switch (OP2) {
|
1916 |
case 0: /*L32E*/ |
1917 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1918 |
gen_check_privilege(dc); |
1919 |
{ |
1920 |
TCGv_i32 addr = tcg_temp_new_i32(); |
1921 |
tcg_gen_addi_i32(addr, cpu_R[RRR_S], |
1922 |
(0xffffffc0 | (RRR_R << 2))); |
1923 |
tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring); |
1924 |
tcg_temp_free(addr); |
1925 |
} |
1926 |
break;
|
1927 |
|
1928 |
case 4: /*S32E*/ |
1929 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1930 |
gen_check_privilege(dc); |
1931 |
{ |
1932 |
TCGv_i32 addr = tcg_temp_new_i32(); |
1933 |
tcg_gen_addi_i32(addr, cpu_R[RRR_S], |
1934 |
(0xffffffc0 | (RRR_R << 2))); |
1935 |
tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring); |
1936 |
tcg_temp_free(addr); |
1937 |
} |
1938 |
break;
|
1939 |
|
1940 |
default:
|
1941 |
RESERVED(); |
1942 |
break;
|
1943 |
} |
1944 |
break;
|
1945 |
|
1946 |
case 10: /*FP0*/ |
1947 |
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); |
1948 |
switch (OP2) {
|
1949 |
case 0: /*ADD.Sf*/ |
1950 |
gen_check_cpenable(dc, 0);
|
1951 |
gen_helper_add_s(cpu_FR[RRR_R], cpu_env, |
1952 |
cpu_FR[RRR_S], cpu_FR[RRR_T]); |
1953 |
break;
|
1954 |
|
1955 |
case 1: /*SUB.Sf*/ |
1956 |
gen_check_cpenable(dc, 0);
|
1957 |
gen_helper_sub_s(cpu_FR[RRR_R], cpu_env, |
1958 |
cpu_FR[RRR_S], cpu_FR[RRR_T]); |
1959 |
break;
|
1960 |
|
1961 |
case 2: /*MUL.Sf*/ |
1962 |
gen_check_cpenable(dc, 0);
|
1963 |
gen_helper_mul_s(cpu_FR[RRR_R], cpu_env, |
1964 |
cpu_FR[RRR_S], cpu_FR[RRR_T]); |
1965 |
break;
|
1966 |
|
1967 |
case 4: /*MADD.Sf*/ |
1968 |
gen_check_cpenable(dc, 0);
|
1969 |
gen_helper_madd_s(cpu_FR[RRR_R], cpu_env, |
1970 |
cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); |
1971 |
break;
|
1972 |
|
1973 |
case 5: /*MSUB.Sf*/ |
1974 |
gen_check_cpenable(dc, 0);
|
1975 |
gen_helper_msub_s(cpu_FR[RRR_R], cpu_env, |
1976 |
cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); |
1977 |
break;
|
1978 |
|
1979 |
case 8: /*ROUND.Sf*/ |
1980 |
case 9: /*TRUNC.Sf*/ |
1981 |
case 10: /*FLOOR.Sf*/ |
1982 |
case 11: /*CEIL.Sf*/ |
1983 |
case 14: /*UTRUNC.Sf*/ |
1984 |
gen_window_check1(dc, RRR_R); |
1985 |
gen_check_cpenable(dc, 0);
|
1986 |
{ |
1987 |
static const unsigned rounding_mode_const[] = { |
1988 |
float_round_nearest_even, |
1989 |
float_round_to_zero, |
1990 |
float_round_down, |
1991 |
float_round_up, |
1992 |
[6] = float_round_to_zero,
|
1993 |
}; |
1994 |
TCGv_i32 rounding_mode = tcg_const_i32( |
1995 |
rounding_mode_const[OP2 & 7]);
|
1996 |
TCGv_i32 scale = tcg_const_i32(RRR_T); |
1997 |
|
1998 |
if (OP2 == 14) { |
1999 |
gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S], |
2000 |
rounding_mode, scale); |
2001 |
} else {
|
2002 |
gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S], |
2003 |
rounding_mode, scale); |
2004 |
} |
2005 |
|
2006 |
tcg_temp_free(rounding_mode); |
2007 |
tcg_temp_free(scale); |
2008 |
} |
2009 |
break;
|
2010 |
|
2011 |
case 12: /*FLOAT.Sf*/ |
2012 |
case 13: /*UFLOAT.Sf*/ |
2013 |
gen_window_check1(dc, RRR_S); |
2014 |
gen_check_cpenable(dc, 0);
|
2015 |
{ |
2016 |
TCGv_i32 scale = tcg_const_i32(-RRR_T); |
2017 |
|
2018 |
if (OP2 == 13) { |
2019 |
gen_helper_uitof(cpu_FR[RRR_R], cpu_env, |
2020 |
cpu_R[RRR_S], scale); |
2021 |
} else {
|
2022 |
gen_helper_itof(cpu_FR[RRR_R], cpu_env, |
2023 |
cpu_R[RRR_S], scale); |
2024 |
} |
2025 |
tcg_temp_free(scale); |
2026 |
} |
2027 |
break;
|
2028 |
|
2029 |
case 15: /*FP1OP*/ |
2030 |
switch (RRR_T) {
|
2031 |
case 0: /*MOV.Sf*/ |
2032 |
gen_check_cpenable(dc, 0);
|
2033 |
tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]); |
2034 |
break;
|
2035 |
|
2036 |
case 1: /*ABS.Sf*/ |
2037 |
gen_check_cpenable(dc, 0);
|
2038 |
gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); |
2039 |
break;
|
2040 |
|
2041 |
case 4: /*RFRf*/ |
2042 |
gen_window_check1(dc, RRR_R); |
2043 |
gen_check_cpenable(dc, 0);
|
2044 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]); |
2045 |
break;
|
2046 |
|
2047 |
case 5: /*WFRf*/ |
2048 |
gen_window_check1(dc, RRR_S); |
2049 |
gen_check_cpenable(dc, 0);
|
2050 |
tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]); |
2051 |
break;
|
2052 |
|
2053 |
case 6: /*NEG.Sf*/ |
2054 |
gen_check_cpenable(dc, 0);
|
2055 |
gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); |
2056 |
break;
|
2057 |
|
2058 |
default: /*reserved*/ |
2059 |
RESERVED(); |
2060 |
break;
|
2061 |
} |
2062 |
break;
|
2063 |
|
2064 |
default: /*reserved*/ |
2065 |
RESERVED(); |
2066 |
break;
|
2067 |
} |
2068 |
break;
|
2069 |
|
2070 |
case 11: /*FP1*/ |
2071 |
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); |
2072 |
|
2073 |
#define gen_compare(rel, br, a, b) \
|
2074 |
do { \
|
2075 |
TCGv_i32 bit = tcg_const_i32(1 << br); \
|
2076 |
\ |
2077 |
gen_check_cpenable(dc, 0); \
|
2078 |
gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \ |
2079 |
tcg_temp_free(bit); \ |
2080 |
} while (0) |
2081 |
|
2082 |
switch (OP2) {
|
2083 |
case 1: /*UN.Sf*/ |
2084 |
gen_compare(un_s, RRR_R, RRR_S, RRR_T); |
2085 |
break;
|
2086 |
|
2087 |
case 2: /*OEQ.Sf*/ |
2088 |
gen_compare(oeq_s, RRR_R, RRR_S, RRR_T); |
2089 |
break;
|
2090 |
|
2091 |
case 3: /*UEQ.Sf*/ |
2092 |
gen_compare(ueq_s, RRR_R, RRR_S, RRR_T); |
2093 |
break;
|
2094 |
|
2095 |
case 4: /*OLT.Sf*/ |
2096 |
gen_compare(olt_s, RRR_R, RRR_S, RRR_T); |
2097 |
break;
|
2098 |
|
2099 |
case 5: /*ULT.Sf*/ |
2100 |
gen_compare(ult_s, RRR_R, RRR_S, RRR_T); |
2101 |
break;
|
2102 |
|
2103 |
case 6: /*OLE.Sf*/ |
2104 |
gen_compare(ole_s, RRR_R, RRR_S, RRR_T); |
2105 |
break;
|
2106 |
|
2107 |
case 7: /*ULE.Sf*/ |
2108 |
gen_compare(ule_s, RRR_R, RRR_S, RRR_T); |
2109 |
break;
|
2110 |
|
2111 |
#undef gen_compare
|
2112 |
|
2113 |
case 8: /*MOVEQZ.Sf*/ |
2114 |
case 9: /*MOVNEZ.Sf*/ |
2115 |
case 10: /*MOVLTZ.Sf*/ |
2116 |
case 11: /*MOVGEZ.Sf*/ |
2117 |
gen_window_check1(dc, RRR_T); |
2118 |
gen_check_cpenable(dc, 0);
|
2119 |
{ |
2120 |
static const TCGCond cond[] = { |
2121 |
TCG_COND_EQ, |
2122 |
TCG_COND_NE, |
2123 |
TCG_COND_LT, |
2124 |
TCG_COND_GE, |
2125 |
}; |
2126 |
TCGv_i32 zero = tcg_const_i32(0);
|
2127 |
|
2128 |
tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R],
|
2129 |
cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]); |
2130 |
tcg_temp_free(zero); |
2131 |
} |
2132 |
break;
|
2133 |
|
2134 |
case 12: /*MOVF.Sf*/ |
2135 |
case 13: /*MOVT.Sf*/ |
2136 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
2137 |
gen_check_cpenable(dc, 0);
|
2138 |
{ |
2139 |
TCGv_i32 zero = tcg_const_i32(0);
|
2140 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2141 |
|
2142 |
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
|
2143 |
tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
|
2144 |
cpu_FR[RRR_R], tmp, zero, |
2145 |
cpu_FR[RRR_S], cpu_FR[RRR_R]); |
2146 |
|
2147 |
tcg_temp_free(tmp); |
2148 |
tcg_temp_free(zero); |
2149 |
} |
2150 |
break;
|
2151 |
|
2152 |
default: /*reserved*/ |
2153 |
RESERVED(); |
2154 |
break;
|
2155 |
} |
2156 |
break;
|
2157 |
|
2158 |
default: /*reserved*/ |
2159 |
RESERVED(); |
2160 |
break;
|
2161 |
} |
2162 |
break;
|
2163 |
|
2164 |
case 1: /*L32R*/ |
2165 |
gen_window_check1(dc, RRR_T); |
2166 |
{ |
2167 |
TCGv_i32 tmp = tcg_const_i32( |
2168 |
((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ? |
2169 |
0 : ((dc->pc + 3) & ~3)) + |
2170 |
(0xfffc0000 | (RI16_IMM16 << 2))); |
2171 |
|
2172 |
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
|
2173 |
tcg_gen_add_i32(tmp, tmp, dc->litbase); |
2174 |
} |
2175 |
tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring); |
2176 |
tcg_temp_free(tmp); |
2177 |
} |
2178 |
break;
|
2179 |
|
2180 |
case 2: /*LSAI*/ |
2181 |
#define gen_load_store(type, shift) do { \ |
2182 |
TCGv_i32 addr = tcg_temp_new_i32(); \ |
2183 |
gen_window_check2(dc, RRI8_S, RRI8_T); \ |
2184 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \ |
2185 |
if (shift) { \
|
2186 |
gen_load_store_alignment(dc, shift, addr, false); \
|
2187 |
} \ |
2188 |
tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ |
2189 |
tcg_temp_free(addr); \ |
2190 |
} while (0) |
2191 |
|
2192 |
switch (RRI8_R) {
|
2193 |
case 0: /*L8UI*/ |
2194 |
gen_load_store(ld8u, 0);
|
2195 |
break;
|
2196 |
|
2197 |
case 1: /*L16UI*/ |
2198 |
gen_load_store(ld16u, 1);
|
2199 |
break;
|
2200 |
|
2201 |
case 2: /*L32I*/ |
2202 |
gen_load_store(ld32u, 2);
|
2203 |
break;
|
2204 |
|
2205 |
case 4: /*S8I*/ |
2206 |
gen_load_store(st8, 0);
|
2207 |
break;
|
2208 |
|
2209 |
case 5: /*S16I*/ |
2210 |
gen_load_store(st16, 1);
|
2211 |
break;
|
2212 |
|
2213 |
case 6: /*S32I*/ |
2214 |
gen_load_store(st32, 2);
|
2215 |
break;
|
2216 |
|
2217 |
case 7: /*CACHEc*/ |
2218 |
if (RRI8_T < 8) { |
2219 |
HAS_OPTION(XTENSA_OPTION_DCACHE); |
2220 |
} |
2221 |
|
2222 |
switch (RRI8_T) {
|
2223 |
case 0: /*DPFRc*/ |
2224 |
break;
|
2225 |
|
2226 |
case 1: /*DPFWc*/ |
2227 |
break;
|
2228 |
|
2229 |
case 2: /*DPFROc*/ |
2230 |
break;
|
2231 |
|
2232 |
case 3: /*DPFWOc*/ |
2233 |
break;
|
2234 |
|
2235 |
case 4: /*DHWBc*/ |
2236 |
break;
|
2237 |
|
2238 |
case 5: /*DHWBIc*/ |
2239 |
break;
|
2240 |
|
2241 |
case 6: /*DHIc*/ |
2242 |
break;
|
2243 |
|
2244 |
case 7: /*DIIc*/ |
2245 |
break;
|
2246 |
|
2247 |
case 8: /*DCEc*/ |
2248 |
switch (OP1) {
|
2249 |
case 0: /*DPFLl*/ |
2250 |
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); |
2251 |
break;
|
2252 |
|
2253 |
case 2: /*DHUl*/ |
2254 |
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); |
2255 |
break;
|
2256 |
|
2257 |
case 3: /*DIUl*/ |
2258 |
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); |
2259 |
break;
|
2260 |
|
2261 |
case 4: /*DIWBc*/ |
2262 |
HAS_OPTION(XTENSA_OPTION_DCACHE); |
2263 |
break;
|
2264 |
|
2265 |
case 5: /*DIWBIc*/ |
2266 |
HAS_OPTION(XTENSA_OPTION_DCACHE); |
2267 |
break;
|
2268 |
|
2269 |
default: /*reserved*/ |
2270 |
RESERVED(); |
2271 |
break;
|
2272 |
|
2273 |
} |
2274 |
break;
|
2275 |
|
2276 |
case 12: /*IPFc*/ |
2277 |
HAS_OPTION(XTENSA_OPTION_ICACHE); |
2278 |
break;
|
2279 |
|
2280 |
case 13: /*ICEc*/ |
2281 |
switch (OP1) {
|
2282 |
case 0: /*IPFLl*/ |
2283 |
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); |
2284 |
break;
|
2285 |
|
2286 |
case 2: /*IHUl*/ |
2287 |
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); |
2288 |
break;
|
2289 |
|
2290 |
case 3: /*IIUl*/ |
2291 |
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); |
2292 |
break;
|
2293 |
|
2294 |
default: /*reserved*/ |
2295 |
RESERVED(); |
2296 |
break;
|
2297 |
} |
2298 |
break;
|
2299 |
|
2300 |
case 14: /*IHIc*/ |
2301 |
HAS_OPTION(XTENSA_OPTION_ICACHE); |
2302 |
break;
|
2303 |
|
2304 |
case 15: /*IIIc*/ |
2305 |
HAS_OPTION(XTENSA_OPTION_ICACHE); |
2306 |
break;
|
2307 |
|
2308 |
default: /*reserved*/ |
2309 |
RESERVED(); |
2310 |
break;
|
2311 |
} |
2312 |
break;
|
2313 |
|
2314 |
case 9: /*L16SI*/ |
2315 |
gen_load_store(ld16s, 1);
|
2316 |
break;
|
2317 |
#undef gen_load_store
|
2318 |
|
2319 |
case 10: /*MOVI*/ |
2320 |
gen_window_check1(dc, RRI8_T); |
2321 |
tcg_gen_movi_i32(cpu_R[RRI8_T], |
2322 |
RRI8_IMM8 | (RRI8_S << 8) |
|
2323 |
((RRI8_S & 0x8) ? 0xfffff000 : 0)); |
2324 |
break;
|
2325 |
|
2326 |
#define gen_load_store_no_hw_align(type) do { \ |
2327 |
TCGv_i32 addr = tcg_temp_local_new_i32(); \ |
2328 |
gen_window_check2(dc, RRI8_S, RRI8_T); \ |
2329 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
|
2330 |
gen_load_store_alignment(dc, 2, addr, true); \ |
2331 |
tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ |
2332 |
tcg_temp_free(addr); \ |
2333 |
} while (0) |
2334 |
|
2335 |
case 11: /*L32AIy*/ |
2336 |
HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); |
2337 |
gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
|
2338 |
break;
|
2339 |
|
2340 |
case 12: /*ADDI*/ |
2341 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2342 |
tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE); |
2343 |
break;
|
2344 |
|
2345 |
case 13: /*ADDMI*/ |
2346 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2347 |
tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
|
2348 |
break;
|
2349 |
|
2350 |
case 14: /*S32C1Iy*/ |
2351 |
HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE); |
2352 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2353 |
{ |
2354 |
int label = gen_new_label();
|
2355 |
TCGv_i32 tmp = tcg_temp_local_new_i32(); |
2356 |
TCGv_i32 addr = tcg_temp_local_new_i32(); |
2357 |
TCGv_i32 tpc; |
2358 |
|
2359 |
tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]); |
2360 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
|
2361 |
gen_load_store_alignment(dc, 2, addr, true); |
2362 |
|
2363 |
gen_advance_ccount(dc); |
2364 |
tpc = tcg_const_i32(dc->pc); |
2365 |
gen_helper_check_atomctl(cpu_env, tpc, addr); |
2366 |
tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); |
2367 |
tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T], |
2368 |
cpu_SR[SCOMPARE1], label); |
2369 |
|
2370 |
tcg_gen_qemu_st32(tmp, addr, dc->cring); |
2371 |
|
2372 |
gen_set_label(label); |
2373 |
tcg_temp_free(tpc); |
2374 |
tcg_temp_free(addr); |
2375 |
tcg_temp_free(tmp); |
2376 |
} |
2377 |
break;
|
2378 |
|
2379 |
case 15: /*S32RIy*/ |
2380 |
HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); |
2381 |
gen_load_store_no_hw_align(st32); /*TODO release?*/
|
2382 |
break;
|
2383 |
#undef gen_load_store_no_hw_align
|
2384 |
|
2385 |
default: /*reserved*/ |
2386 |
RESERVED(); |
2387 |
break;
|
2388 |
} |
2389 |
break;
|
2390 |
|
2391 |
case 3: /*LSCIp*/ |
2392 |
switch (RRI8_R) {
|
2393 |
case 0: /*LSIf*/ |
2394 |
case 4: /*SSIf*/ |
2395 |
case 8: /*LSIUf*/ |
2396 |
case 12: /*SSIUf*/ |
2397 |
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); |
2398 |
gen_window_check1(dc, RRI8_S); |
2399 |
gen_check_cpenable(dc, 0);
|
2400 |
{ |
2401 |
TCGv_i32 addr = tcg_temp_new_i32(); |
2402 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
|
2403 |
gen_load_store_alignment(dc, 2, addr, false); |
2404 |
if (RRI8_R & 0x4) { |
2405 |
tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring); |
2406 |
} else {
|
2407 |
tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring); |
2408 |
} |
2409 |
if (RRI8_R & 0x8) { |
2410 |
tcg_gen_mov_i32(cpu_R[RRI8_S], addr); |
2411 |
} |
2412 |
tcg_temp_free(addr); |
2413 |
} |
2414 |
break;
|
2415 |
|
2416 |
default: /*reserved*/ |
2417 |
RESERVED(); |
2418 |
break;
|
2419 |
} |
2420 |
break;
|
2421 |
|
2422 |
case 4: /*MAC16d*/ |
2423 |
HAS_OPTION(XTENSA_OPTION_MAC16); |
2424 |
{ |
2425 |
enum {
|
2426 |
MAC16_UMUL = 0x0,
|
2427 |
MAC16_MUL = 0x4,
|
2428 |
MAC16_MULA = 0x8,
|
2429 |
MAC16_MULS = 0xc,
|
2430 |
MAC16_NONE = 0xf,
|
2431 |
} op = OP1 & 0xc;
|
2432 |
bool is_m1_sr = (OP2 & 0x3) == 2; |
2433 |
bool is_m2_sr = (OP2 & 0xc) == 0; |
2434 |
uint32_t ld_offset = 0;
|
2435 |
|
2436 |
if (OP2 > 9) { |
2437 |
RESERVED(); |
2438 |
} |
2439 |
|
2440 |
switch (OP2 & 2) { |
2441 |
case 0: /*MACI?/MACC?*/ |
2442 |
is_m1_sr = true;
|
2443 |
ld_offset = (OP2 & 1) ? -4 : 4; |
2444 |
|
2445 |
if (OP2 >= 8) { /*MACI/MACC*/ |
2446 |
if (OP1 == 0) { /*LDINC/LDDEC*/ |
2447 |
op = MAC16_NONE; |
2448 |
} else {
|
2449 |
RESERVED(); |
2450 |
} |
2451 |
} else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/ |
2452 |
RESERVED(); |
2453 |
} |
2454 |
break;
|
2455 |
|
2456 |
case 2: /*MACD?/MACA?*/ |
2457 |
if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/ |
2458 |
RESERVED(); |
2459 |
} |
2460 |
break;
|
2461 |
} |
2462 |
|
2463 |
if (op != MAC16_NONE) {
|
2464 |
if (!is_m1_sr) {
|
2465 |
gen_window_check1(dc, RRR_S); |
2466 |
} |
2467 |
if (!is_m2_sr) {
|
2468 |
gen_window_check1(dc, RRR_T); |
2469 |
} |
2470 |
} |
2471 |
|
2472 |
{ |
2473 |
TCGv_i32 vaddr = tcg_temp_new_i32(); |
2474 |
TCGv_i32 mem32 = tcg_temp_new_i32(); |
2475 |
|
2476 |
if (ld_offset) {
|
2477 |
gen_window_check1(dc, RRR_S); |
2478 |
tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset); |
2479 |
gen_load_store_alignment(dc, 2, vaddr, false); |
2480 |
tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); |
2481 |
} |
2482 |
if (op != MAC16_NONE) {
|
2483 |
TCGv_i32 m1 = gen_mac16_m( |
2484 |
is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S], |
2485 |
OP1 & 1, op == MAC16_UMUL);
|
2486 |
TCGv_i32 m2 = gen_mac16_m( |
2487 |
is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
|
2488 |
OP1 & 2, op == MAC16_UMUL);
|
2489 |
|
2490 |
if (op == MAC16_MUL || op == MAC16_UMUL) {
|
2491 |
tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); |
2492 |
if (op == MAC16_UMUL) {
|
2493 |
tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
|
2494 |
} else {
|
2495 |
tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
|
2496 |
} |
2497 |
} else {
|
2498 |
TCGv_i32 res = tcg_temp_new_i32(); |
2499 |
TCGv_i64 res64 = tcg_temp_new_i64(); |
2500 |
TCGv_i64 tmp = tcg_temp_new_i64(); |
2501 |
|
2502 |
tcg_gen_mul_i32(res, m1, m2); |
2503 |
tcg_gen_ext_i32_i64(res64, res); |
2504 |
tcg_gen_concat_i32_i64(tmp, |
2505 |
cpu_SR[ACCLO], cpu_SR[ACCHI]); |
2506 |
if (op == MAC16_MULA) {
|
2507 |
tcg_gen_add_i64(tmp, tmp, res64); |
2508 |
} else {
|
2509 |
tcg_gen_sub_i64(tmp, tmp, res64); |
2510 |
} |
2511 |
tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp); |
2512 |
tcg_gen_shri_i64(tmp, tmp, 32);
|
2513 |
tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp); |
2514 |
tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); |
2515 |
|
2516 |
tcg_temp_free(res); |
2517 |
tcg_temp_free_i64(res64); |
2518 |
tcg_temp_free_i64(tmp); |
2519 |
} |
2520 |
tcg_temp_free(m1); |
2521 |
tcg_temp_free(m2); |
2522 |
} |
2523 |
if (ld_offset) {
|
2524 |
tcg_gen_mov_i32(cpu_R[RRR_S], vaddr); |
2525 |
tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32); |
2526 |
} |
2527 |
tcg_temp_free(vaddr); |
2528 |
tcg_temp_free(mem32); |
2529 |
} |
2530 |
} |
2531 |
break;
|
2532 |
|
2533 |
case 5: /*CALLN*/ |
2534 |
switch (CALL_N) {
|
2535 |
case 0: /*CALL0*/ |
2536 |
tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
|
2537 |
gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); |
2538 |
break;
|
2539 |
|
2540 |
case 1: /*CALL4w*/ |
2541 |
case 2: /*CALL8w*/ |
2542 |
case 3: /*CALL12w*/ |
2543 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
2544 |
gen_window_check1(dc, CALL_N << 2);
|
2545 |
gen_callwi(dc, CALL_N, |
2546 |
(dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); |
2547 |
break;
|
2548 |
} |
2549 |
break;
|
2550 |
|
2551 |
case 6: /*SI*/ |
2552 |
switch (CALL_N) {
|
2553 |
case 0: /*J*/ |
2554 |
gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0); |
2555 |
break;
|
2556 |
|
2557 |
case 1: /*BZ*/ |
2558 |
gen_window_check1(dc, BRI12_S); |
2559 |
{ |
2560 |
static const TCGCond cond[] = { |
2561 |
TCG_COND_EQ, /*BEQZ*/
|
2562 |
TCG_COND_NE, /*BNEZ*/
|
2563 |
TCG_COND_LT, /*BLTZ*/
|
2564 |
TCG_COND_GE, /*BGEZ*/
|
2565 |
}; |
2566 |
|
2567 |
gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0, |
2568 |
4 + BRI12_IMM12_SE);
|
2569 |
} |
2570 |
break;
|
2571 |
|
2572 |
case 2: /*BI0*/ |
2573 |
gen_window_check1(dc, BRI8_S); |
2574 |
{ |
2575 |
static const TCGCond cond[] = { |
2576 |
TCG_COND_EQ, /*BEQI*/
|
2577 |
TCG_COND_NE, /*BNEI*/
|
2578 |
TCG_COND_LT, /*BLTI*/
|
2579 |
TCG_COND_GE, /*BGEI*/
|
2580 |
}; |
2581 |
|
2582 |
gen_brcondi(dc, cond[BRI8_M & 3],
|
2583 |
cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
|
2584 |
} |
2585 |
break;
|
2586 |
|
2587 |
case 3: /*BI1*/ |
2588 |
switch (BRI8_M) {
|
2589 |
case 0: /*ENTRYw*/ |
2590 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
2591 |
{ |
2592 |
TCGv_i32 pc = tcg_const_i32(dc->pc); |
2593 |
TCGv_i32 s = tcg_const_i32(BRI12_S); |
2594 |
TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); |
2595 |
gen_advance_ccount(dc); |
2596 |
gen_helper_entry(cpu_env, pc, s, imm); |
2597 |
tcg_temp_free(imm); |
2598 |
tcg_temp_free(s); |
2599 |
tcg_temp_free(pc); |
2600 |
reset_used_window(dc); |
2601 |
} |
2602 |
break;
|
2603 |
|
2604 |
case 1: /*B1*/ |
2605 |
switch (BRI8_R) {
|
2606 |
case 0: /*BFp*/ |
2607 |
case 1: /*BTp*/ |
2608 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
2609 |
{ |
2610 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2611 |
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
|
2612 |
gen_brcondi(dc, |
2613 |
BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
|
2614 |
tmp, 0, 4 + RRI8_IMM8_SE); |
2615 |
tcg_temp_free(tmp); |
2616 |
} |
2617 |
break;
|
2618 |
|
2619 |
case 8: /*LOOP*/ |
2620 |
case 9: /*LOOPNEZ*/ |
2621 |
case 10: /*LOOPGTZ*/ |
2622 |
HAS_OPTION(XTENSA_OPTION_LOOP); |
2623 |
gen_window_check1(dc, RRI8_S); |
2624 |
{ |
2625 |
uint32_t lend = dc->pc + RRI8_IMM8 + 4;
|
2626 |
TCGv_i32 tmp = tcg_const_i32(lend); |
2627 |
|
2628 |
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
|
2629 |
tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); |
2630 |
gen_helper_wsr_lend(cpu_env, tmp); |
2631 |
tcg_temp_free(tmp); |
2632 |
|
2633 |
if (BRI8_R > 8) { |
2634 |
int label = gen_new_label();
|
2635 |
tcg_gen_brcondi_i32( |
2636 |
BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
|
2637 |
cpu_R[RRI8_S], 0, label);
|
2638 |
gen_jumpi(dc, lend, 1);
|
2639 |
gen_set_label(label); |
2640 |
} |
2641 |
|
2642 |
gen_jumpi(dc, dc->next_pc, 0);
|
2643 |
} |
2644 |
break;
|
2645 |
|
2646 |
default: /*reserved*/ |
2647 |
RESERVED(); |
2648 |
break;
|
2649 |
|
2650 |
} |
2651 |
break;
|
2652 |
|
2653 |
case 2: /*BLTUI*/ |
2654 |
case 3: /*BGEUI*/ |
2655 |
gen_window_check1(dc, BRI8_S); |
2656 |
gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
|
2657 |
cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
|
2658 |
break;
|
2659 |
} |
2660 |
break;
|
2661 |
|
2662 |
} |
2663 |
break;
|
2664 |
|
2665 |
case 7: /*B*/ |
2666 |
{ |
2667 |
TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
|
2668 |
|
2669 |
switch (RRI8_R & 7) { |
2670 |
case 0: /*BNONE*/ /*BANY*/ |
2671 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2672 |
{ |
2673 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2674 |
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); |
2675 |
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); |
2676 |
tcg_temp_free(tmp); |
2677 |
} |
2678 |
break;
|
2679 |
|
2680 |
case 1: /*BEQ*/ /*BNE*/ |
2681 |
case 2: /*BLT*/ /*BGE*/ |
2682 |
case 3: /*BLTU*/ /*BGEU*/ |
2683 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2684 |
{ |
2685 |
static const TCGCond cond[] = { |
2686 |
[1] = TCG_COND_EQ,
|
2687 |
[2] = TCG_COND_LT,
|
2688 |
[3] = TCG_COND_LTU,
|
2689 |
[9] = TCG_COND_NE,
|
2690 |
[10] = TCG_COND_GE,
|
2691 |
[11] = TCG_COND_GEU,
|
2692 |
}; |
2693 |
gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T], |
2694 |
4 + RRI8_IMM8_SE);
|
2695 |
} |
2696 |
break;
|
2697 |
|
2698 |
case 4: /*BALL*/ /*BNALL*/ |
2699 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2700 |
{ |
2701 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2702 |
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); |
2703 |
gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T], |
2704 |
4 + RRI8_IMM8_SE);
|
2705 |
tcg_temp_free(tmp); |
2706 |
} |
2707 |
break;
|
2708 |
|
2709 |
case 5: /*BBC*/ /*BBS*/ |
2710 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2711 |
{ |
2712 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2713 |
TCGv_i32 bit = tcg_const_i32(0x80000000);
|
2714 |
#else
|
2715 |
TCGv_i32 bit = tcg_const_i32(0x00000001);
|
2716 |
#endif
|
2717 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2718 |
tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
|
2719 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2720 |
tcg_gen_shr_i32(bit, bit, tmp); |
2721 |
#else
|
2722 |
tcg_gen_shl_i32(bit, bit, tmp); |
2723 |
#endif
|
2724 |
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit); |
2725 |
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); |
2726 |
tcg_temp_free(tmp); |
2727 |
tcg_temp_free(bit); |
2728 |
} |
2729 |
break;
|
2730 |
|
2731 |
case 6: /*BBCI*/ /*BBSI*/ |
2732 |
case 7: |
2733 |
gen_window_check1(dc, RRI8_S); |
2734 |
{ |
2735 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2736 |
tcg_gen_andi_i32(tmp, cpu_R[RRI8_S], |
2737 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2738 |
0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T)); |
2739 |
#else
|
2740 |
0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T)); |
2741 |
#endif
|
2742 |
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); |
2743 |
tcg_temp_free(tmp); |
2744 |
} |
2745 |
break;
|
2746 |
|
2747 |
} |
2748 |
} |
2749 |
break;
|
2750 |
|
2751 |
#define gen_narrow_load_store(type) do { \ |
2752 |
TCGv_i32 addr = tcg_temp_new_i32(); \ |
2753 |
gen_window_check2(dc, RRRN_S, RRRN_T); \ |
2754 |
tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
|
2755 |
gen_load_store_alignment(dc, 2, addr, false); \ |
2756 |
tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \ |
2757 |
tcg_temp_free(addr); \ |
2758 |
} while (0) |
2759 |
|
2760 |
case 8: /*L32I.Nn*/ |
2761 |
gen_narrow_load_store(ld32u); |
2762 |
break;
|
2763 |
|
2764 |
case 9: /*S32I.Nn*/ |
2765 |
gen_narrow_load_store(st32); |
2766 |
break;
|
2767 |
#undef gen_narrow_load_store
|
2768 |
|
2769 |
case 10: /*ADD.Nn*/ |
2770 |
gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T); |
2771 |
tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]); |
2772 |
break;
|
2773 |
|
2774 |
case 11: /*ADDI.Nn*/ |
2775 |
gen_window_check2(dc, RRRN_R, RRRN_S); |
2776 |
tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
|
2777 |
break;
|
2778 |
|
2779 |
case 12: /*ST2n*/ |
2780 |
gen_window_check1(dc, RRRN_S); |
2781 |
if (RRRN_T < 8) { /*MOVI.Nn*/ |
2782 |
tcg_gen_movi_i32(cpu_R[RRRN_S], |
2783 |
RRRN_R | (RRRN_T << 4) |
|
2784 |
((RRRN_T & 6) == 6 ? 0xffffff80 : 0)); |
2785 |
} else { /*BEQZ.Nn*/ /*BNEZ.Nn*/ |
2786 |
TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
|
2787 |
|
2788 |
gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
|
2789 |
4 + (RRRN_R | ((RRRN_T & 3) << 4))); |
2790 |
} |
2791 |
break;
|
2792 |
|
2793 |
case 13: /*ST3n*/ |
2794 |
switch (RRRN_R) {
|
2795 |
case 0: /*MOV.Nn*/ |
2796 |
gen_window_check2(dc, RRRN_S, RRRN_T); |
2797 |
tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]); |
2798 |
break;
|
2799 |
|
2800 |
case 15: /*S3*/ |
2801 |
switch (RRRN_T) {
|
2802 |
case 0: /*RET.Nn*/ |
2803 |
gen_jump(dc, cpu_R[0]);
|
2804 |
break;
|
2805 |
|
2806 |
case 1: /*RETW.Nn*/ |
2807 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
2808 |
{ |
2809 |
TCGv_i32 tmp = tcg_const_i32(dc->pc); |
2810 |
gen_advance_ccount(dc); |
2811 |
gen_helper_retw(tmp, cpu_env, tmp); |
2812 |
gen_jump(dc, tmp); |
2813 |
tcg_temp_free(tmp); |
2814 |
} |
2815 |
break;
|
2816 |
|
2817 |
case 2: /*BREAK.Nn*/ |
2818 |
HAS_OPTION(XTENSA_OPTION_DEBUG); |
2819 |
if (dc->debug) {
|
2820 |
gen_debug_exception(dc, DEBUGCAUSE_BN); |
2821 |
} |
2822 |
break;
|
2823 |
|
2824 |
case 3: /*NOP.Nn*/ |
2825 |
break;
|
2826 |
|
2827 |
case 6: /*ILL.Nn*/ |
2828 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
2829 |
break;
|
2830 |
|
2831 |
default: /*reserved*/ |
2832 |
RESERVED(); |
2833 |
break;
|
2834 |
} |
2835 |
break;
|
2836 |
|
2837 |
default: /*reserved*/ |
2838 |
RESERVED(); |
2839 |
break;
|
2840 |
} |
2841 |
break;
|
2842 |
|
2843 |
default: /*reserved*/ |
2844 |
RESERVED(); |
2845 |
break;
|
2846 |
} |
2847 |
|
2848 |
if (dc->is_jmp == DISAS_NEXT) {
|
2849 |
gen_check_loop_end(dc, 0);
|
2850 |
} |
2851 |
dc->pc = dc->next_pc; |
2852 |
|
2853 |
return;
|
2854 |
|
2855 |
invalid_opcode:
|
2856 |
qemu_log("INVALID(pc = %08x)\n", dc->pc);
|
2857 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
2858 |
#undef HAS_OPTION
|
2859 |
} |
2860 |
|
2861 |
static void check_breakpoint(CPUXtensaState *env, DisasContext *dc) |
2862 |
{ |
2863 |
CPUBreakpoint *bp; |
2864 |
|
2865 |
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
|
2866 |
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
2867 |
if (bp->pc == dc->pc) {
|
2868 |
tcg_gen_movi_i32(cpu_pc, dc->pc); |
2869 |
gen_exception(dc, EXCP_DEBUG); |
2870 |
dc->is_jmp = DISAS_UPDATE; |
2871 |
} |
2872 |
} |
2873 |
} |
2874 |
} |
2875 |
|
2876 |
static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc) |
2877 |
{ |
2878 |
unsigned i;
|
2879 |
|
2880 |
for (i = 0; i < dc->config->nibreak; ++i) { |
2881 |
if ((env->sregs[IBREAKENABLE] & (1 << i)) && |
2882 |
env->sregs[IBREAKA + i] == dc->pc) { |
2883 |
gen_debug_exception(dc, DEBUGCAUSE_IB); |
2884 |
break;
|
2885 |
} |
2886 |
} |
2887 |
} |
2888 |
|
2889 |
static void gen_intermediate_code_internal( |
2890 |
CPUXtensaState *env, TranslationBlock *tb, int search_pc)
|
2891 |
{ |
2892 |
DisasContext dc; |
2893 |
int insn_count = 0; |
2894 |
int j, lj = -1; |
2895 |
uint16_t *gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; |
2896 |
int max_insns = tb->cflags & CF_COUNT_MASK;
|
2897 |
uint32_t pc_start = tb->pc; |
2898 |
uint32_t next_page_start = |
2899 |
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
2900 |
|
2901 |
if (max_insns == 0) { |
2902 |
max_insns = CF_COUNT_MASK; |
2903 |
} |
2904 |
|
2905 |
dc.config = env->config; |
2906 |
dc.singlestep_enabled = env->singlestep_enabled; |
2907 |
dc.tb = tb; |
2908 |
dc.pc = pc_start; |
2909 |
dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK; |
2910 |
dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
|
2911 |
dc.lbeg = env->sregs[LBEG]; |
2912 |
dc.lend = env->sregs[LEND]; |
2913 |
dc.is_jmp = DISAS_NEXT; |
2914 |
dc.ccount_delta = 0;
|
2915 |
dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG; |
2916 |
dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT; |
2917 |
dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >> |
2918 |
XTENSA_TBFLAG_CPENABLE_SHIFT; |
2919 |
|
2920 |
init_litbase(&dc); |
2921 |
init_sar_tracker(&dc); |
2922 |
reset_used_window(&dc); |
2923 |
if (dc.icount) {
|
2924 |
dc.next_icount = tcg_temp_local_new_i32(); |
2925 |
} |
2926 |
|
2927 |
gen_icount_start(); |
2928 |
|
2929 |
if (env->singlestep_enabled && env->exception_taken) {
|
2930 |
env->exception_taken = 0;
|
2931 |
tcg_gen_movi_i32(cpu_pc, dc.pc); |
2932 |
gen_exception(&dc, EXCP_DEBUG); |
2933 |
} |
2934 |
|
2935 |
do {
|
2936 |
check_breakpoint(env, &dc); |
2937 |
|
2938 |
if (search_pc) {
|
2939 |
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; |
2940 |
if (lj < j) {
|
2941 |
lj++; |
2942 |
while (lj < j) {
|
2943 |
tcg_ctx.gen_opc_instr_start[lj++] = 0;
|
2944 |
} |
2945 |
} |
2946 |
tcg_ctx.gen_opc_pc[lj] = dc.pc; |
2947 |
tcg_ctx.gen_opc_instr_start[lj] = 1;
|
2948 |
tcg_ctx.gen_opc_icount[lj] = insn_count; |
2949 |
} |
2950 |
|
2951 |
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
|
2952 |
tcg_gen_debug_insn_start(dc.pc); |
2953 |
} |
2954 |
|
2955 |
++dc.ccount_delta; |
2956 |
|
2957 |
if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { |
2958 |
gen_io_start(); |
2959 |
} |
2960 |
|
2961 |
if (dc.icount) {
|
2962 |
int label = gen_new_label();
|
2963 |
|
2964 |
tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
|
2965 |
tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
|
2966 |
tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]); |
2967 |
if (dc.debug) {
|
2968 |
gen_debug_exception(&dc, DEBUGCAUSE_IC); |
2969 |
} |
2970 |
gen_set_label(label); |
2971 |
} |
2972 |
|
2973 |
if (dc.debug) {
|
2974 |
gen_ibreak_check(env, &dc); |
2975 |
} |
2976 |
|
2977 |
disas_xtensa_insn(env, &dc); |
2978 |
++insn_count; |
2979 |
if (dc.icount) {
|
2980 |
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount); |
2981 |
} |
2982 |
if (env->singlestep_enabled) {
|
2983 |
tcg_gen_movi_i32(cpu_pc, dc.pc); |
2984 |
gen_exception(&dc, EXCP_DEBUG); |
2985 |
break;
|
2986 |
} |
2987 |
} while (dc.is_jmp == DISAS_NEXT &&
|
2988 |
insn_count < max_insns && |
2989 |
dc.pc < next_page_start && |
2990 |
tcg_ctx.gen_opc_ptr < gen_opc_end); |
2991 |
|
2992 |
reset_litbase(&dc); |
2993 |
reset_sar_tracker(&dc); |
2994 |
if (dc.icount) {
|
2995 |
tcg_temp_free(dc.next_icount); |
2996 |
} |
2997 |
|
2998 |
if (tb->cflags & CF_LAST_IO) {
|
2999 |
gen_io_end(); |
3000 |
} |
3001 |
|
3002 |
if (dc.is_jmp == DISAS_NEXT) {
|
3003 |
gen_jumpi(&dc, dc.pc, 0);
|
3004 |
} |
3005 |
gen_icount_end(tb, insn_count); |
3006 |
*tcg_ctx.gen_opc_ptr = INDEX_op_end; |
3007 |
|
3008 |
if (!search_pc) {
|
3009 |
tb->size = dc.pc - pc_start; |
3010 |
tb->icount = insn_count; |
3011 |
} |
3012 |
} |
3013 |
|
3014 |
void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
|
3015 |
{ |
3016 |
gen_intermediate_code_internal(env, tb, 0);
|
3017 |
} |
3018 |
|
3019 |
void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
|
3020 |
{ |
3021 |
gen_intermediate_code_internal(env, tb, 1);
|
3022 |
} |
3023 |
|
3024 |
void cpu_dump_state(CPUXtensaState *env, FILE *f, fprintf_function cpu_fprintf,
|
3025 |
int flags)
|
3026 |
{ |
3027 |
int i, j;
|
3028 |
|
3029 |
cpu_fprintf(f, "PC=%08x\n\n", env->pc);
|
3030 |
|
3031 |
for (i = j = 0; i < 256; ++i) { |
3032 |
if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) {
|
3033 |
cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i],
|
3034 |
(j++ % 4) == 3 ? '\n' : ' '); |
3035 |
} |
3036 |
} |
3037 |
|
3038 |
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); |
3039 |
|
3040 |
for (i = j = 0; i < 256; ++i) { |
3041 |
if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) {
|
3042 |
cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i],
|
3043 |
(j++ % 4) == 3 ? '\n' : ' '); |
3044 |
} |
3045 |
} |
3046 |
|
3047 |
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); |
3048 |
|
3049 |
for (i = 0; i < 16; ++i) { |
3050 |
cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i],
|
3051 |
(i % 4) == 3 ? '\n' : ' '); |
3052 |
} |
3053 |
|
3054 |
cpu_fprintf(f, "\n");
|
3055 |
|
3056 |
for (i = 0; i < env->config->nareg; ++i) { |
3057 |
cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
|
3058 |
(i % 4) == 3 ? '\n' : ' '); |
3059 |
} |
3060 |
|
3061 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
|
3062 |
cpu_fprintf(f, "\n");
|
3063 |
|
3064 |
for (i = 0; i < 16; ++i) { |
3065 |
cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
|
3066 |
float32_val(env->fregs[i]), |
3067 |
*(float *)&env->fregs[i], (i % 2) == 1 ? '\n' : ' '); |
3068 |
} |
3069 |
} |
3070 |
} |
3071 |
|
3072 |
void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos) |
3073 |
{ |
3074 |
env->pc = tcg_ctx.gen_opc_pc[pc_pos]; |
3075 |
} |