root / target-xtensa / translate.c @ f9cb5045
History | View | Annotate | Download (84.7 kB)
1 |
/*
|
---|---|
2 |
* Xtensa ISA:
|
3 |
* http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
|
4 |
*
|
5 |
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
|
6 |
* All rights reserved.
|
7 |
*
|
8 |
* Redistribution and use in source and binary forms, with or without
|
9 |
* modification, are permitted provided that the following conditions are met:
|
10 |
* * Redistributions of source code must retain the above copyright
|
11 |
* notice, this list of conditions and the following disclaimer.
|
12 |
* * Redistributions in binary form must reproduce the above copyright
|
13 |
* notice, this list of conditions and the following disclaimer in the
|
14 |
* documentation and/or other materials provided with the distribution.
|
15 |
* * Neither the name of the Open Source and Linux Lab nor the
|
16 |
* names of its contributors may be used to endorse or promote products
|
17 |
* derived from this software without specific prior written permission.
|
18 |
*
|
19 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
20 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
21 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
22 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
23 |
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
24 |
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
25 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
26 |
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
28 |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29 |
*/
|
30 |
|
31 |
#include <stdio.h> |
32 |
|
33 |
#include "cpu.h" |
34 |
#include "exec-all.h" |
35 |
#include "disas.h" |
36 |
#include "tcg-op.h" |
37 |
#include "qemu-log.h" |
38 |
#include "sysemu.h" |
39 |
|
40 |
#include "helper.h" |
41 |
#define GEN_HELPER 1 |
42 |
#include "helper.h" |
43 |
|
44 |
typedef struct DisasContext { |
45 |
const XtensaConfig *config;
|
46 |
TranslationBlock *tb; |
47 |
uint32_t pc; |
48 |
uint32_t next_pc; |
49 |
int cring;
|
50 |
int ring;
|
51 |
uint32_t lbeg; |
52 |
uint32_t lend; |
53 |
TCGv_i32 litbase; |
54 |
int is_jmp;
|
55 |
int singlestep_enabled;
|
56 |
|
57 |
bool sar_5bit;
|
58 |
bool sar_m32_5bit;
|
59 |
bool sar_m32_allocated;
|
60 |
TCGv_i32 sar_m32; |
61 |
|
62 |
uint32_t ccount_delta; |
63 |
unsigned used_window;
|
64 |
|
65 |
bool debug;
|
66 |
bool icount;
|
67 |
TCGv_i32 next_icount; |
68 |
} DisasContext; |
69 |
|
70 |
static TCGv_ptr cpu_env;
|
71 |
static TCGv_i32 cpu_pc;
|
72 |
static TCGv_i32 cpu_R[16]; |
73 |
static TCGv_i32 cpu_SR[256]; |
74 |
static TCGv_i32 cpu_UR[256]; |
75 |
|
76 |
#include "gen-icount.h" |
77 |
|
78 |
static const char * const sregnames[256] = { |
79 |
[LBEG] = "LBEG",
|
80 |
[LEND] = "LEND",
|
81 |
[LCOUNT] = "LCOUNT",
|
82 |
[SAR] = "SAR",
|
83 |
[BR] = "BR",
|
84 |
[LITBASE] = "LITBASE",
|
85 |
[SCOMPARE1] = "SCOMPARE1",
|
86 |
[ACCLO] = "ACCLO",
|
87 |
[ACCHI] = "ACCHI",
|
88 |
[MR] = "MR0",
|
89 |
[MR + 1] = "MR1", |
90 |
[MR + 2] = "MR2", |
91 |
[MR + 3] = "MR3", |
92 |
[WINDOW_BASE] = "WINDOW_BASE",
|
93 |
[WINDOW_START] = "WINDOW_START",
|
94 |
[PTEVADDR] = "PTEVADDR",
|
95 |
[RASID] = "RASID",
|
96 |
[ITLBCFG] = "ITLBCFG",
|
97 |
[DTLBCFG] = "DTLBCFG",
|
98 |
[IBREAKENABLE] = "IBREAKENABLE",
|
99 |
[IBREAKA] = "IBREAKA0",
|
100 |
[IBREAKA + 1] = "IBREAKA1", |
101 |
[DBREAKA] = "DBREAKA0",
|
102 |
[DBREAKA + 1] = "DBREAKA1", |
103 |
[DBREAKC] = "DBREAKC0",
|
104 |
[DBREAKC + 1] = "DBREAKC1", |
105 |
[EPC1] = "EPC1",
|
106 |
[EPC1 + 1] = "EPC2", |
107 |
[EPC1 + 2] = "EPC3", |
108 |
[EPC1 + 3] = "EPC4", |
109 |
[EPC1 + 4] = "EPC5", |
110 |
[EPC1 + 5] = "EPC6", |
111 |
[EPC1 + 6] = "EPC7", |
112 |
[DEPC] = "DEPC",
|
113 |
[EPS2] = "EPS2",
|
114 |
[EPS2 + 1] = "EPS3", |
115 |
[EPS2 + 2] = "EPS4", |
116 |
[EPS2 + 3] = "EPS5", |
117 |
[EPS2 + 4] = "EPS6", |
118 |
[EPS2 + 5] = "EPS7", |
119 |
[EXCSAVE1] = "EXCSAVE1",
|
120 |
[EXCSAVE1 + 1] = "EXCSAVE2", |
121 |
[EXCSAVE1 + 2] = "EXCSAVE3", |
122 |
[EXCSAVE1 + 3] = "EXCSAVE4", |
123 |
[EXCSAVE1 + 4] = "EXCSAVE5", |
124 |
[EXCSAVE1 + 5] = "EXCSAVE6", |
125 |
[EXCSAVE1 + 6] = "EXCSAVE7", |
126 |
[CPENABLE] = "CPENABLE",
|
127 |
[INTSET] = "INTSET",
|
128 |
[INTCLEAR] = "INTCLEAR",
|
129 |
[INTENABLE] = "INTENABLE",
|
130 |
[PS] = "PS",
|
131 |
[VECBASE] = "VECBASE",
|
132 |
[EXCCAUSE] = "EXCCAUSE",
|
133 |
[DEBUGCAUSE] = "DEBUGCAUSE",
|
134 |
[CCOUNT] = "CCOUNT",
|
135 |
[PRID] = "PRID",
|
136 |
[ICOUNT] = "ICOUNT",
|
137 |
[ICOUNTLEVEL] = "ICOUNTLEVEL",
|
138 |
[EXCVADDR] = "EXCVADDR",
|
139 |
[CCOMPARE] = "CCOMPARE0",
|
140 |
[CCOMPARE + 1] = "CCOMPARE1", |
141 |
[CCOMPARE + 2] = "CCOMPARE2", |
142 |
}; |
143 |
|
144 |
static const char * const uregnames[256] = { |
145 |
[THREADPTR] = "THREADPTR",
|
146 |
[FCR] = "FCR",
|
147 |
[FSR] = "FSR",
|
148 |
}; |
149 |
|
150 |
void xtensa_translate_init(void) |
151 |
{ |
152 |
static const char * const regnames[] = { |
153 |
"ar0", "ar1", "ar2", "ar3", |
154 |
"ar4", "ar5", "ar6", "ar7", |
155 |
"ar8", "ar9", "ar10", "ar11", |
156 |
"ar12", "ar13", "ar14", "ar15", |
157 |
}; |
158 |
int i;
|
159 |
|
160 |
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
161 |
cpu_pc = tcg_global_mem_new_i32(TCG_AREG0, |
162 |
offsetof(CPUXtensaState, pc), "pc");
|
163 |
|
164 |
for (i = 0; i < 16; i++) { |
165 |
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, |
166 |
offsetof(CPUXtensaState, regs[i]), |
167 |
regnames[i]); |
168 |
} |
169 |
|
170 |
for (i = 0; i < 256; ++i) { |
171 |
if (sregnames[i]) {
|
172 |
cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0, |
173 |
offsetof(CPUXtensaState, sregs[i]), |
174 |
sregnames[i]); |
175 |
} |
176 |
} |
177 |
|
178 |
for (i = 0; i < 256; ++i) { |
179 |
if (uregnames[i]) {
|
180 |
cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0, |
181 |
offsetof(CPUXtensaState, uregs[i]), |
182 |
uregnames[i]); |
183 |
} |
184 |
} |
185 |
#define GEN_HELPER 2 |
186 |
#include "helper.h" |
187 |
} |
188 |
|
189 |
static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt) |
190 |
{ |
191 |
return xtensa_option_bits_enabled(dc->config, opt);
|
192 |
} |
193 |
|
194 |
static inline bool option_enabled(DisasContext *dc, int opt) |
195 |
{ |
196 |
return xtensa_option_enabled(dc->config, opt);
|
197 |
} |
198 |
|
199 |
static void init_litbase(DisasContext *dc) |
200 |
{ |
201 |
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
|
202 |
dc->litbase = tcg_temp_local_new_i32(); |
203 |
tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
|
204 |
} |
205 |
} |
206 |
|
207 |
static void reset_litbase(DisasContext *dc) |
208 |
{ |
209 |
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
|
210 |
tcg_temp_free(dc->litbase); |
211 |
} |
212 |
} |
213 |
|
214 |
static void init_sar_tracker(DisasContext *dc) |
215 |
{ |
216 |
dc->sar_5bit = false;
|
217 |
dc->sar_m32_5bit = false;
|
218 |
dc->sar_m32_allocated = false;
|
219 |
} |
220 |
|
221 |
static void reset_sar_tracker(DisasContext *dc) |
222 |
{ |
223 |
if (dc->sar_m32_allocated) {
|
224 |
tcg_temp_free(dc->sar_m32); |
225 |
} |
226 |
} |
227 |
|
228 |
static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) |
229 |
{ |
230 |
tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
|
231 |
if (dc->sar_m32_5bit) {
|
232 |
tcg_gen_discard_i32(dc->sar_m32); |
233 |
} |
234 |
dc->sar_5bit = true;
|
235 |
dc->sar_m32_5bit = false;
|
236 |
} |
237 |
|
238 |
static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) |
239 |
{ |
240 |
TCGv_i32 tmp = tcg_const_i32(32);
|
241 |
if (!dc->sar_m32_allocated) {
|
242 |
dc->sar_m32 = tcg_temp_local_new_i32(); |
243 |
dc->sar_m32_allocated = true;
|
244 |
} |
245 |
tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
|
246 |
tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32); |
247 |
dc->sar_5bit = false;
|
248 |
dc->sar_m32_5bit = true;
|
249 |
tcg_temp_free(tmp); |
250 |
} |
251 |
|
252 |
static void gen_advance_ccount(DisasContext *dc) |
253 |
{ |
254 |
if (dc->ccount_delta > 0) { |
255 |
TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta); |
256 |
dc->ccount_delta = 0;
|
257 |
gen_helper_advance_ccount(cpu_env, tmp); |
258 |
tcg_temp_free(tmp); |
259 |
} |
260 |
} |
261 |
|
262 |
static void reset_used_window(DisasContext *dc) |
263 |
{ |
264 |
dc->used_window = 0;
|
265 |
} |
266 |
|
267 |
static void gen_exception(DisasContext *dc, int excp) |
268 |
{ |
269 |
TCGv_i32 tmp = tcg_const_i32(excp); |
270 |
gen_advance_ccount(dc); |
271 |
gen_helper_exception(cpu_env, tmp); |
272 |
tcg_temp_free(tmp); |
273 |
} |
274 |
|
275 |
static void gen_exception_cause(DisasContext *dc, uint32_t cause) |
276 |
{ |
277 |
TCGv_i32 tpc = tcg_const_i32(dc->pc); |
278 |
TCGv_i32 tcause = tcg_const_i32(cause); |
279 |
gen_advance_ccount(dc); |
280 |
gen_helper_exception_cause(cpu_env, tpc, tcause); |
281 |
tcg_temp_free(tpc); |
282 |
tcg_temp_free(tcause); |
283 |
if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
|
284 |
cause == SYSCALL_CAUSE) { |
285 |
dc->is_jmp = DISAS_UPDATE; |
286 |
} |
287 |
} |
288 |
|
289 |
static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, |
290 |
TCGv_i32 vaddr) |
291 |
{ |
292 |
TCGv_i32 tpc = tcg_const_i32(dc->pc); |
293 |
TCGv_i32 tcause = tcg_const_i32(cause); |
294 |
gen_advance_ccount(dc); |
295 |
gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); |
296 |
tcg_temp_free(tpc); |
297 |
tcg_temp_free(tcause); |
298 |
} |
299 |
|
300 |
static void gen_debug_exception(DisasContext *dc, uint32_t cause) |
301 |
{ |
302 |
TCGv_i32 tpc = tcg_const_i32(dc->pc); |
303 |
TCGv_i32 tcause = tcg_const_i32(cause); |
304 |
gen_advance_ccount(dc); |
305 |
gen_helper_debug_exception(cpu_env, tpc, tcause); |
306 |
tcg_temp_free(tpc); |
307 |
tcg_temp_free(tcause); |
308 |
if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
|
309 |
dc->is_jmp = DISAS_UPDATE; |
310 |
} |
311 |
} |
312 |
|
313 |
static void gen_check_privilege(DisasContext *dc) |
314 |
{ |
315 |
if (dc->cring) {
|
316 |
gen_exception_cause(dc, PRIVILEGED_CAUSE); |
317 |
dc->is_jmp = DISAS_UPDATE; |
318 |
} |
319 |
} |
320 |
|
321 |
static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) |
322 |
{ |
323 |
tcg_gen_mov_i32(cpu_pc, dest); |
324 |
gen_advance_ccount(dc); |
325 |
if (dc->icount) {
|
326 |
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); |
327 |
} |
328 |
if (dc->singlestep_enabled) {
|
329 |
gen_exception(dc, EXCP_DEBUG); |
330 |
} else {
|
331 |
if (slot >= 0) { |
332 |
tcg_gen_goto_tb(slot); |
333 |
tcg_gen_exit_tb((tcg_target_long)dc->tb + slot); |
334 |
} else {
|
335 |
tcg_gen_exit_tb(0);
|
336 |
} |
337 |
} |
338 |
dc->is_jmp = DISAS_UPDATE; |
339 |
} |
340 |
|
341 |
static void gen_jump(DisasContext *dc, TCGv dest) |
342 |
{ |
343 |
gen_jump_slot(dc, dest, -1);
|
344 |
} |
345 |
|
346 |
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot) |
347 |
{ |
348 |
TCGv_i32 tmp = tcg_const_i32(dest); |
349 |
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) { |
350 |
slot = -1;
|
351 |
} |
352 |
gen_jump_slot(dc, tmp, slot); |
353 |
tcg_temp_free(tmp); |
354 |
} |
355 |
|
356 |
static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest, |
357 |
int slot)
|
358 |
{ |
359 |
TCGv_i32 tcallinc = tcg_const_i32(callinc); |
360 |
|
361 |
tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS], |
362 |
tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN); |
363 |
tcg_temp_free(tcallinc); |
364 |
tcg_gen_movi_i32(cpu_R[callinc << 2],
|
365 |
(callinc << 30) | (dc->next_pc & 0x3fffffff)); |
366 |
gen_jump_slot(dc, dest, slot); |
367 |
} |
368 |
|
369 |
static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest) |
370 |
{ |
371 |
gen_callw_slot(dc, callinc, dest, -1);
|
372 |
} |
373 |
|
374 |
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot) |
375 |
{ |
376 |
TCGv_i32 tmp = tcg_const_i32(dest); |
377 |
if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) { |
378 |
slot = -1;
|
379 |
} |
380 |
gen_callw_slot(dc, callinc, tmp, slot); |
381 |
tcg_temp_free(tmp); |
382 |
} |
383 |
|
384 |
static bool gen_check_loop_end(DisasContext *dc, int slot) |
385 |
{ |
386 |
if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
|
387 |
!(dc->tb->flags & XTENSA_TBFLAG_EXCM) && |
388 |
dc->next_pc == dc->lend) { |
389 |
int label = gen_new_label();
|
390 |
|
391 |
gen_advance_ccount(dc); |
392 |
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
|
393 |
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
|
394 |
gen_jumpi(dc, dc->lbeg, slot); |
395 |
gen_set_label(label); |
396 |
gen_jumpi(dc, dc->next_pc, -1);
|
397 |
return true; |
398 |
} |
399 |
return false; |
400 |
} |
401 |
|
402 |
static void gen_jumpi_check_loop_end(DisasContext *dc, int slot) |
403 |
{ |
404 |
if (!gen_check_loop_end(dc, slot)) {
|
405 |
gen_jumpi(dc, dc->next_pc, slot); |
406 |
} |
407 |
} |
408 |
|
409 |
static void gen_brcond(DisasContext *dc, TCGCond cond, |
410 |
TCGv_i32 t0, TCGv_i32 t1, uint32_t offset) |
411 |
{ |
412 |
int label = gen_new_label();
|
413 |
|
414 |
gen_advance_ccount(dc); |
415 |
tcg_gen_brcond_i32(cond, t0, t1, label); |
416 |
gen_jumpi_check_loop_end(dc, 0);
|
417 |
gen_set_label(label); |
418 |
gen_jumpi(dc, dc->pc + offset, 1);
|
419 |
} |
420 |
|
421 |
static void gen_brcondi(DisasContext *dc, TCGCond cond, |
422 |
TCGv_i32 t0, uint32_t t1, uint32_t offset) |
423 |
{ |
424 |
TCGv_i32 tmp = tcg_const_i32(t1); |
425 |
gen_brcond(dc, cond, t0, tmp, offset); |
426 |
tcg_temp_free(tmp); |
427 |
} |
428 |
|
429 |
static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) |
430 |
{ |
431 |
gen_advance_ccount(dc); |
432 |
tcg_gen_mov_i32(d, cpu_SR[sr]); |
433 |
} |
434 |
|
435 |
static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr) |
436 |
{ |
437 |
tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
|
438 |
tcg_gen_or_i32(d, d, cpu_SR[sr]); |
439 |
tcg_gen_andi_i32(d, d, 0xfffffffc);
|
440 |
} |
441 |
|
442 |
static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) |
443 |
{ |
444 |
static void (* const rsr_handler[256])(DisasContext *dc, |
445 |
TCGv_i32 d, uint32_t sr) = { |
446 |
[CCOUNT] = gen_rsr_ccount, |
447 |
[PTEVADDR] = gen_rsr_ptevaddr, |
448 |
}; |
449 |
|
450 |
if (sregnames[sr]) {
|
451 |
if (rsr_handler[sr]) {
|
452 |
rsr_handler[sr](dc, d, sr); |
453 |
} else {
|
454 |
tcg_gen_mov_i32(d, cpu_SR[sr]); |
455 |
} |
456 |
} else {
|
457 |
qemu_log("RSR %d not implemented, ", sr);
|
458 |
} |
459 |
} |
460 |
|
461 |
static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
462 |
{ |
463 |
gen_helper_wsr_lbeg(cpu_env, s); |
464 |
gen_jumpi_check_loop_end(dc, 0);
|
465 |
} |
466 |
|
467 |
static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
468 |
{ |
469 |
gen_helper_wsr_lend(cpu_env, s); |
470 |
gen_jumpi_check_loop_end(dc, 0);
|
471 |
} |
472 |
|
473 |
static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
474 |
{ |
475 |
tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
|
476 |
if (dc->sar_m32_5bit) {
|
477 |
tcg_gen_discard_i32(dc->sar_m32); |
478 |
} |
479 |
dc->sar_5bit = false;
|
480 |
dc->sar_m32_5bit = false;
|
481 |
} |
482 |
|
483 |
static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
484 |
{ |
485 |
tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
|
486 |
} |
487 |
|
488 |
static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
489 |
{ |
490 |
tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
|
491 |
/* This can change tb->flags, so exit tb */
|
492 |
gen_jumpi_check_loop_end(dc, -1);
|
493 |
} |
494 |
|
495 |
static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
496 |
{ |
497 |
tcg_gen_ext8s_i32(cpu_SR[sr], s); |
498 |
} |
499 |
|
500 |
static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
501 |
{ |
502 |
gen_helper_wsr_windowbase(cpu_env, v); |
503 |
reset_used_window(dc); |
504 |
} |
505 |
|
506 |
static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
507 |
{ |
508 |
tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1); |
509 |
reset_used_window(dc); |
510 |
} |
511 |
|
512 |
static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
513 |
{ |
514 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
|
515 |
} |
516 |
|
517 |
static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
518 |
{ |
519 |
gen_helper_wsr_rasid(cpu_env, v); |
520 |
/* This can change tb->flags, so exit tb */
|
521 |
gen_jumpi_check_loop_end(dc, -1);
|
522 |
} |
523 |
|
524 |
static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
525 |
{ |
526 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
|
527 |
} |
528 |
|
529 |
static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
530 |
{ |
531 |
gen_helper_wsr_ibreakenable(cpu_env, v); |
532 |
gen_jumpi_check_loop_end(dc, 0);
|
533 |
} |
534 |
|
535 |
static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
536 |
{ |
537 |
unsigned id = sr - IBREAKA;
|
538 |
|
539 |
if (id < dc->config->nibreak) {
|
540 |
TCGv_i32 tmp = tcg_const_i32(id); |
541 |
gen_helper_wsr_ibreaka(cpu_env, tmp, v); |
542 |
tcg_temp_free(tmp); |
543 |
gen_jumpi_check_loop_end(dc, 0);
|
544 |
} |
545 |
} |
546 |
|
547 |
static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
548 |
{ |
549 |
unsigned id = sr - DBREAKA;
|
550 |
|
551 |
if (id < dc->config->ndbreak) {
|
552 |
TCGv_i32 tmp = tcg_const_i32(id); |
553 |
gen_helper_wsr_dbreaka(cpu_env, tmp, v); |
554 |
tcg_temp_free(tmp); |
555 |
} |
556 |
} |
557 |
|
558 |
static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
559 |
{ |
560 |
unsigned id = sr - DBREAKC;
|
561 |
|
562 |
if (id < dc->config->ndbreak) {
|
563 |
TCGv_i32 tmp = tcg_const_i32(id); |
564 |
gen_helper_wsr_dbreakc(cpu_env, tmp, v); |
565 |
tcg_temp_free(tmp); |
566 |
} |
567 |
} |
568 |
|
569 |
static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
570 |
{ |
571 |
tcg_gen_andi_i32(cpu_SR[sr], v, |
572 |
dc->config->inttype_mask[INTTYPE_SOFTWARE]); |
573 |
gen_helper_check_interrupts(cpu_env); |
574 |
gen_jumpi_check_loop_end(dc, 0);
|
575 |
} |
576 |
|
577 |
static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
578 |
{ |
579 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
580 |
|
581 |
tcg_gen_andi_i32(tmp, v, |
582 |
dc->config->inttype_mask[INTTYPE_EDGE] | |
583 |
dc->config->inttype_mask[INTTYPE_NMI] | |
584 |
dc->config->inttype_mask[INTTYPE_SOFTWARE]); |
585 |
tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp); |
586 |
tcg_temp_free(tmp); |
587 |
gen_helper_check_interrupts(cpu_env); |
588 |
} |
589 |
|
590 |
static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
591 |
{ |
592 |
tcg_gen_mov_i32(cpu_SR[sr], v); |
593 |
gen_helper_check_interrupts(cpu_env); |
594 |
gen_jumpi_check_loop_end(dc, 0);
|
595 |
} |
596 |
|
597 |
static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
598 |
{ |
599 |
uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB | |
600 |
PS_UM | PS_EXCM | PS_INTLEVEL; |
601 |
|
602 |
if (option_enabled(dc, XTENSA_OPTION_MMU)) {
|
603 |
mask |= PS_RING; |
604 |
} |
605 |
tcg_gen_andi_i32(cpu_SR[sr], v, mask); |
606 |
reset_used_window(dc); |
607 |
gen_helper_check_interrupts(cpu_env); |
608 |
/* This can change mmu index and tb->flags, so exit tb */
|
609 |
gen_jumpi_check_loop_end(dc, -1);
|
610 |
} |
611 |
|
612 |
static void gen_wsr_debugcause(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
613 |
{ |
614 |
} |
615 |
|
616 |
static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
617 |
{ |
618 |
} |
619 |
|
620 |
static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
621 |
{ |
622 |
if (dc->icount) {
|
623 |
tcg_gen_mov_i32(dc->next_icount, v); |
624 |
} else {
|
625 |
tcg_gen_mov_i32(cpu_SR[sr], v); |
626 |
} |
627 |
} |
628 |
|
629 |
static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
630 |
{ |
631 |
tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
|
632 |
/* This can change tb->flags, so exit tb */
|
633 |
gen_jumpi_check_loop_end(dc, -1);
|
634 |
} |
635 |
|
636 |
static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v) |
637 |
{ |
638 |
uint32_t id = sr - CCOMPARE; |
639 |
if (id < dc->config->nccompare) {
|
640 |
uint32_t int_bit = 1 << dc->config->timerint[id];
|
641 |
gen_advance_ccount(dc); |
642 |
tcg_gen_mov_i32(cpu_SR[sr], v); |
643 |
tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit); |
644 |
gen_helper_check_interrupts(cpu_env); |
645 |
} |
646 |
} |
647 |
|
648 |
static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) |
649 |
{ |
650 |
static void (* const wsr_handler[256])(DisasContext *dc, |
651 |
uint32_t sr, TCGv_i32 v) = { |
652 |
[LBEG] = gen_wsr_lbeg, |
653 |
[LEND] = gen_wsr_lend, |
654 |
[SAR] = gen_wsr_sar, |
655 |
[BR] = gen_wsr_br, |
656 |
[LITBASE] = gen_wsr_litbase, |
657 |
[ACCHI] = gen_wsr_acchi, |
658 |
[WINDOW_BASE] = gen_wsr_windowbase, |
659 |
[WINDOW_START] = gen_wsr_windowstart, |
660 |
[PTEVADDR] = gen_wsr_ptevaddr, |
661 |
[RASID] = gen_wsr_rasid, |
662 |
[ITLBCFG] = gen_wsr_tlbcfg, |
663 |
[DTLBCFG] = gen_wsr_tlbcfg, |
664 |
[IBREAKENABLE] = gen_wsr_ibreakenable, |
665 |
[IBREAKA] = gen_wsr_ibreaka, |
666 |
[IBREAKA + 1] = gen_wsr_ibreaka,
|
667 |
[DBREAKA] = gen_wsr_dbreaka, |
668 |
[DBREAKA + 1] = gen_wsr_dbreaka,
|
669 |
[DBREAKC] = gen_wsr_dbreakc, |
670 |
[DBREAKC + 1] = gen_wsr_dbreakc,
|
671 |
[INTSET] = gen_wsr_intset, |
672 |
[INTCLEAR] = gen_wsr_intclear, |
673 |
[INTENABLE] = gen_wsr_intenable, |
674 |
[PS] = gen_wsr_ps, |
675 |
[DEBUGCAUSE] = gen_wsr_debugcause, |
676 |
[PRID] = gen_wsr_prid, |
677 |
[ICOUNT] = gen_wsr_icount, |
678 |
[ICOUNTLEVEL] = gen_wsr_icountlevel, |
679 |
[CCOMPARE] = gen_wsr_ccompare, |
680 |
[CCOMPARE + 1] = gen_wsr_ccompare,
|
681 |
[CCOMPARE + 2] = gen_wsr_ccompare,
|
682 |
}; |
683 |
|
684 |
if (sregnames[sr]) {
|
685 |
if (wsr_handler[sr]) {
|
686 |
wsr_handler[sr](dc, sr, s); |
687 |
} else {
|
688 |
tcg_gen_mov_i32(cpu_SR[sr], s); |
689 |
} |
690 |
} else {
|
691 |
qemu_log("WSR %d not implemented, ", sr);
|
692 |
} |
693 |
} |
694 |
|
695 |
static void gen_load_store_alignment(DisasContext *dc, int shift, |
696 |
TCGv_i32 addr, bool no_hw_alignment)
|
697 |
{ |
698 |
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
|
699 |
tcg_gen_andi_i32(addr, addr, ~0 << shift);
|
700 |
} else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) && |
701 |
no_hw_alignment) { |
702 |
int label = gen_new_label();
|
703 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
704 |
tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
|
705 |
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
|
706 |
gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr); |
707 |
gen_set_label(label); |
708 |
tcg_temp_free(tmp); |
709 |
} |
710 |
} |
711 |
|
712 |
static void gen_waiti(DisasContext *dc, uint32_t imm4) |
713 |
{ |
714 |
TCGv_i32 pc = tcg_const_i32(dc->next_pc); |
715 |
TCGv_i32 intlevel = tcg_const_i32(imm4); |
716 |
gen_advance_ccount(dc); |
717 |
gen_helper_waiti(cpu_env, pc, intlevel); |
718 |
tcg_temp_free(pc); |
719 |
tcg_temp_free(intlevel); |
720 |
} |
721 |
|
722 |
static void gen_window_check1(DisasContext *dc, unsigned r1) |
723 |
{ |
724 |
if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
|
725 |
return;
|
726 |
} |
727 |
if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
|
728 |
r1 / 4 > dc->used_window) {
|
729 |
TCGv_i32 pc = tcg_const_i32(dc->pc); |
730 |
TCGv_i32 w = tcg_const_i32(r1 / 4);
|
731 |
|
732 |
dc->used_window = r1 / 4;
|
733 |
gen_advance_ccount(dc); |
734 |
gen_helper_window_check(cpu_env, pc, w); |
735 |
|
736 |
tcg_temp_free(w); |
737 |
tcg_temp_free(pc); |
738 |
} |
739 |
} |
740 |
|
741 |
static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2) |
742 |
{ |
743 |
gen_window_check1(dc, r1 > r2 ? r1 : r2); |
744 |
} |
745 |
|
746 |
static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2, |
747 |
unsigned r3)
|
748 |
{ |
749 |
gen_window_check2(dc, r1, r2 > r3 ? r2 : r3); |
750 |
} |
751 |
|
752 |
static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned) |
753 |
{ |
754 |
TCGv_i32 m = tcg_temp_new_i32(); |
755 |
|
756 |
if (hi) {
|
757 |
(is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
|
758 |
} else {
|
759 |
(is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v); |
760 |
} |
761 |
return m;
|
762 |
} |
763 |
|
764 |
static void disas_xtensa_insn(DisasContext *dc) |
765 |
{ |
766 |
#define HAS_OPTION_BITS(opt) do { \ |
767 |
if (!option_bits_enabled(dc, opt)) { \
|
768 |
qemu_log("Option is not enabled %s:%d\n", \
|
769 |
__FILE__, __LINE__); \ |
770 |
goto invalid_opcode; \
|
771 |
} \ |
772 |
} while (0) |
773 |
|
774 |
#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
|
775 |
|
776 |
#define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__) |
777 |
#define RESERVED() do { \ |
778 |
qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
|
779 |
dc->pc, b0, b1, b2, __FILE__, __LINE__); \ |
780 |
goto invalid_opcode; \
|
781 |
} while (0) |
782 |
|
783 |
|
784 |
#ifdef TARGET_WORDS_BIGENDIAN
|
785 |
#define OP0 (((b0) & 0xf0) >> 4) |
786 |
#define OP1 (((b2) & 0xf0) >> 4) |
787 |
#define OP2 ((b2) & 0xf) |
788 |
#define RRR_R ((b1) & 0xf) |
789 |
#define RRR_S (((b1) & 0xf0) >> 4) |
790 |
#define RRR_T ((b0) & 0xf) |
791 |
#else
|
792 |
#define OP0 (((b0) & 0xf)) |
793 |
#define OP1 (((b2) & 0xf)) |
794 |
#define OP2 (((b2) & 0xf0) >> 4) |
795 |
#define RRR_R (((b1) & 0xf0) >> 4) |
796 |
#define RRR_S (((b1) & 0xf)) |
797 |
#define RRR_T (((b0) & 0xf0) >> 4) |
798 |
#endif
|
799 |
#define RRR_X ((RRR_R & 0x4) >> 2) |
800 |
#define RRR_Y ((RRR_T & 0x4) >> 2) |
801 |
#define RRR_W (RRR_R & 0x3) |
802 |
|
803 |
#define RRRN_R RRR_R
|
804 |
#define RRRN_S RRR_S
|
805 |
#define RRRN_T RRR_T
|
806 |
|
807 |
#define RRI8_R RRR_R
|
808 |
#define RRI8_S RRR_S
|
809 |
#define RRI8_T RRR_T
|
810 |
#define RRI8_IMM8 (b2)
|
811 |
#define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8) |
812 |
|
813 |
#ifdef TARGET_WORDS_BIGENDIAN
|
814 |
#define RI16_IMM16 (((b1) << 8) | (b2)) |
815 |
#else
|
816 |
#define RI16_IMM16 (((b2) << 8) | (b1)) |
817 |
#endif
|
818 |
|
819 |
#ifdef TARGET_WORDS_BIGENDIAN
|
820 |
#define CALL_N (((b0) & 0xc) >> 2) |
821 |
#define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2)) |
822 |
#else
|
823 |
#define CALL_N (((b0) & 0x30) >> 4) |
824 |
#define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10)) |
825 |
#endif
|
826 |
#define CALL_OFFSET_SE \
|
827 |
(((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET) |
828 |
|
829 |
#define CALLX_N CALL_N
|
830 |
#ifdef TARGET_WORDS_BIGENDIAN
|
831 |
#define CALLX_M ((b0) & 0x3) |
832 |
#else
|
833 |
#define CALLX_M (((b0) & 0xc0) >> 6) |
834 |
#endif
|
835 |
#define CALLX_S RRR_S
|
836 |
|
837 |
#define BRI12_M CALLX_M
|
838 |
#define BRI12_S RRR_S
|
839 |
#ifdef TARGET_WORDS_BIGENDIAN
|
840 |
#define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2)) |
841 |
#else
|
842 |
#define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4)) |
843 |
#endif
|
844 |
#define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12) |
845 |
|
846 |
#define BRI8_M BRI12_M
|
847 |
#define BRI8_R RRI8_R
|
848 |
#define BRI8_S RRI8_S
|
849 |
#define BRI8_IMM8 RRI8_IMM8
|
850 |
#define BRI8_IMM8_SE RRI8_IMM8_SE
|
851 |
|
852 |
#define RSR_SR (b1)
|
853 |
|
854 |
uint8_t b0 = cpu_ldub_code(cpu_single_env, dc->pc); |
855 |
uint8_t b1 = cpu_ldub_code(cpu_single_env, dc->pc + 1);
|
856 |
uint8_t b2 = 0;
|
857 |
|
858 |
static const uint32_t B4CONST[] = { |
859 |
0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 |
860 |
}; |
861 |
|
862 |
static const uint32_t B4CONSTU[] = { |
863 |
32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 |
864 |
}; |
865 |
|
866 |
if (OP0 >= 8) { |
867 |
dc->next_pc = dc->pc + 2;
|
868 |
HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); |
869 |
} else {
|
870 |
dc->next_pc = dc->pc + 3;
|
871 |
b2 = cpu_ldub_code(cpu_single_env, dc->pc + 2);
|
872 |
} |
873 |
|
874 |
switch (OP0) {
|
875 |
case 0: /*QRST*/ |
876 |
switch (OP1) {
|
877 |
case 0: /*RST0*/ |
878 |
switch (OP2) {
|
879 |
case 0: /*ST0*/ |
880 |
if ((RRR_R & 0xc) == 0x8) { |
881 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
882 |
} |
883 |
|
884 |
switch (RRR_R) {
|
885 |
case 0: /*SNM0*/ |
886 |
switch (CALLX_M) {
|
887 |
case 0: /*ILL*/ |
888 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
889 |
break;
|
890 |
|
891 |
case 1: /*reserved*/ |
892 |
RESERVED(); |
893 |
break;
|
894 |
|
895 |
case 2: /*JR*/ |
896 |
switch (CALLX_N) {
|
897 |
case 0: /*RET*/ |
898 |
case 2: /*JX*/ |
899 |
gen_window_check1(dc, CALLX_S); |
900 |
gen_jump(dc, cpu_R[CALLX_S]); |
901 |
break;
|
902 |
|
903 |
case 1: /*RETWw*/ |
904 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
905 |
{ |
906 |
TCGv_i32 tmp = tcg_const_i32(dc->pc); |
907 |
gen_advance_ccount(dc); |
908 |
gen_helper_retw(tmp, cpu_env, tmp); |
909 |
gen_jump(dc, tmp); |
910 |
tcg_temp_free(tmp); |
911 |
} |
912 |
break;
|
913 |
|
914 |
case 3: /*reserved*/ |
915 |
RESERVED(); |
916 |
break;
|
917 |
} |
918 |
break;
|
919 |
|
920 |
case 3: /*CALLX*/ |
921 |
gen_window_check2(dc, CALLX_S, CALLX_N << 2);
|
922 |
switch (CALLX_N) {
|
923 |
case 0: /*CALLX0*/ |
924 |
{ |
925 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
926 |
tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); |
927 |
tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
|
928 |
gen_jump(dc, tmp); |
929 |
tcg_temp_free(tmp); |
930 |
} |
931 |
break;
|
932 |
|
933 |
case 1: /*CALLX4w*/ |
934 |
case 2: /*CALLX8w*/ |
935 |
case 3: /*CALLX12w*/ |
936 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
937 |
{ |
938 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
939 |
|
940 |
tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); |
941 |
gen_callw(dc, CALLX_N, tmp); |
942 |
tcg_temp_free(tmp); |
943 |
} |
944 |
break;
|
945 |
} |
946 |
break;
|
947 |
} |
948 |
break;
|
949 |
|
950 |
case 1: /*MOVSPw*/ |
951 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
952 |
gen_window_check2(dc, RRR_T, RRR_S); |
953 |
{ |
954 |
TCGv_i32 pc = tcg_const_i32(dc->pc); |
955 |
gen_advance_ccount(dc); |
956 |
gen_helper_movsp(cpu_env, pc); |
957 |
tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); |
958 |
tcg_temp_free(pc); |
959 |
} |
960 |
break;
|
961 |
|
962 |
case 2: /*SYNC*/ |
963 |
switch (RRR_T) {
|
964 |
case 0: /*ISYNC*/ |
965 |
break;
|
966 |
|
967 |
case 1: /*RSYNC*/ |
968 |
break;
|
969 |
|
970 |
case 2: /*ESYNC*/ |
971 |
break;
|
972 |
|
973 |
case 3: /*DSYNC*/ |
974 |
break;
|
975 |
|
976 |
case 8: /*EXCW*/ |
977 |
HAS_OPTION(XTENSA_OPTION_EXCEPTION); |
978 |
break;
|
979 |
|
980 |
case 12: /*MEMW*/ |
981 |
break;
|
982 |
|
983 |
case 13: /*EXTW*/ |
984 |
break;
|
985 |
|
986 |
case 15: /*NOP*/ |
987 |
break;
|
988 |
|
989 |
default: /*reserved*/ |
990 |
RESERVED(); |
991 |
break;
|
992 |
} |
993 |
break;
|
994 |
|
995 |
case 3: /*RFEIx*/ |
996 |
switch (RRR_T) {
|
997 |
case 0: /*RFETx*/ |
998 |
HAS_OPTION(XTENSA_OPTION_EXCEPTION); |
999 |
switch (RRR_S) {
|
1000 |
case 0: /*RFEx*/ |
1001 |
gen_check_privilege(dc); |
1002 |
tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); |
1003 |
gen_helper_check_interrupts(cpu_env); |
1004 |
gen_jump(dc, cpu_SR[EPC1]); |
1005 |
break;
|
1006 |
|
1007 |
case 1: /*RFUEx*/ |
1008 |
RESERVED(); |
1009 |
break;
|
1010 |
|
1011 |
case 2: /*RFDEx*/ |
1012 |
gen_check_privilege(dc); |
1013 |
gen_jump(dc, cpu_SR[ |
1014 |
dc->config->ndepc ? DEPC : EPC1]); |
1015 |
break;
|
1016 |
|
1017 |
case 4: /*RFWOw*/ |
1018 |
case 5: /*RFWUw*/ |
1019 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1020 |
gen_check_privilege(dc); |
1021 |
{ |
1022 |
TCGv_i32 tmp = tcg_const_i32(1);
|
1023 |
|
1024 |
tcg_gen_andi_i32( |
1025 |
cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); |
1026 |
tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); |
1027 |
|
1028 |
if (RRR_S == 4) { |
1029 |
tcg_gen_andc_i32(cpu_SR[WINDOW_START], |
1030 |
cpu_SR[WINDOW_START], tmp); |
1031 |
} else {
|
1032 |
tcg_gen_or_i32(cpu_SR[WINDOW_START], |
1033 |
cpu_SR[WINDOW_START], tmp); |
1034 |
} |
1035 |
|
1036 |
gen_helper_restore_owb(cpu_env); |
1037 |
gen_helper_check_interrupts(cpu_env); |
1038 |
gen_jump(dc, cpu_SR[EPC1]); |
1039 |
|
1040 |
tcg_temp_free(tmp); |
1041 |
} |
1042 |
break;
|
1043 |
|
1044 |
default: /*reserved*/ |
1045 |
RESERVED(); |
1046 |
break;
|
1047 |
} |
1048 |
break;
|
1049 |
|
1050 |
case 1: /*RFIx*/ |
1051 |
HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT); |
1052 |
if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) { |
1053 |
gen_check_privilege(dc); |
1054 |
tcg_gen_mov_i32(cpu_SR[PS], |
1055 |
cpu_SR[EPS2 + RRR_S - 2]);
|
1056 |
gen_helper_check_interrupts(cpu_env); |
1057 |
gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
|
1058 |
} else {
|
1059 |
qemu_log("RFI %d is illegal\n", RRR_S);
|
1060 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
1061 |
} |
1062 |
break;
|
1063 |
|
1064 |
case 2: /*RFME*/ |
1065 |
TBD(); |
1066 |
break;
|
1067 |
|
1068 |
default: /*reserved*/ |
1069 |
RESERVED(); |
1070 |
break;
|
1071 |
|
1072 |
} |
1073 |
break;
|
1074 |
|
1075 |
case 4: /*BREAKx*/ |
1076 |
HAS_OPTION(XTENSA_OPTION_DEBUG); |
1077 |
if (dc->debug) {
|
1078 |
gen_debug_exception(dc, DEBUGCAUSE_BI); |
1079 |
} |
1080 |
break;
|
1081 |
|
1082 |
case 5: /*SYSCALLx*/ |
1083 |
HAS_OPTION(XTENSA_OPTION_EXCEPTION); |
1084 |
switch (RRR_S) {
|
1085 |
case 0: /*SYSCALLx*/ |
1086 |
gen_exception_cause(dc, SYSCALL_CAUSE); |
1087 |
break;
|
1088 |
|
1089 |
case 1: /*SIMCALL*/ |
1090 |
if (semihosting_enabled) {
|
1091 |
gen_check_privilege(dc); |
1092 |
gen_helper_simcall(cpu_env); |
1093 |
} else {
|
1094 |
qemu_log("SIMCALL but semihosting is disabled\n");
|
1095 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
1096 |
} |
1097 |
break;
|
1098 |
|
1099 |
default:
|
1100 |
RESERVED(); |
1101 |
break;
|
1102 |
} |
1103 |
break;
|
1104 |
|
1105 |
case 6: /*RSILx*/ |
1106 |
HAS_OPTION(XTENSA_OPTION_INTERRUPT); |
1107 |
gen_check_privilege(dc); |
1108 |
gen_window_check1(dc, RRR_T); |
1109 |
tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); |
1110 |
tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); |
1111 |
tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); |
1112 |
gen_helper_check_interrupts(cpu_env); |
1113 |
gen_jumpi_check_loop_end(dc, 0);
|
1114 |
break;
|
1115 |
|
1116 |
case 7: /*WAITIx*/ |
1117 |
HAS_OPTION(XTENSA_OPTION_INTERRUPT); |
1118 |
gen_check_privilege(dc); |
1119 |
gen_waiti(dc, RRR_S); |
1120 |
break;
|
1121 |
|
1122 |
case 8: /*ANY4p*/ |
1123 |
case 9: /*ALL4p*/ |
1124 |
case 10: /*ANY8p*/ |
1125 |
case 11: /*ALL8p*/ |
1126 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
1127 |
{ |
1128 |
const unsigned shift = (RRR_R & 2) ? 8 : 4; |
1129 |
TCGv_i32 mask = tcg_const_i32( |
1130 |
((1 << shift) - 1) << RRR_S); |
1131 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1132 |
|
1133 |
tcg_gen_and_i32(tmp, cpu_SR[BR], mask); |
1134 |
if (RRR_R & 1) { /*ALL*/ |
1135 |
tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
|
1136 |
} else { /*ANY*/ |
1137 |
tcg_gen_add_i32(tmp, tmp, mask); |
1138 |
} |
1139 |
tcg_gen_shri_i32(tmp, tmp, RRR_S + shift); |
1140 |
tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], |
1141 |
tmp, RRR_T, 1);
|
1142 |
tcg_temp_free(mask); |
1143 |
tcg_temp_free(tmp); |
1144 |
} |
1145 |
break;
|
1146 |
|
1147 |
default: /*reserved*/ |
1148 |
RESERVED(); |
1149 |
break;
|
1150 |
|
1151 |
} |
1152 |
break;
|
1153 |
|
1154 |
case 1: /*AND*/ |
1155 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1156 |
tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1157 |
break;
|
1158 |
|
1159 |
case 2: /*OR*/ |
1160 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1161 |
tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1162 |
break;
|
1163 |
|
1164 |
case 3: /*XOR*/ |
1165 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1166 |
tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1167 |
break;
|
1168 |
|
1169 |
case 4: /*ST1*/ |
1170 |
switch (RRR_R) {
|
1171 |
case 0: /*SSR*/ |
1172 |
gen_window_check1(dc, RRR_S); |
1173 |
gen_right_shift_sar(dc, cpu_R[RRR_S]); |
1174 |
break;
|
1175 |
|
1176 |
case 1: /*SSL*/ |
1177 |
gen_window_check1(dc, RRR_S); |
1178 |
gen_left_shift_sar(dc, cpu_R[RRR_S]); |
1179 |
break;
|
1180 |
|
1181 |
case 2: /*SSA8L*/ |
1182 |
gen_window_check1(dc, RRR_S); |
1183 |
{ |
1184 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1185 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
|
1186 |
gen_right_shift_sar(dc, tmp); |
1187 |
tcg_temp_free(tmp); |
1188 |
} |
1189 |
break;
|
1190 |
|
1191 |
case 3: /*SSA8B*/ |
1192 |
gen_window_check1(dc, RRR_S); |
1193 |
{ |
1194 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1195 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
|
1196 |
gen_left_shift_sar(dc, tmp); |
1197 |
tcg_temp_free(tmp); |
1198 |
} |
1199 |
break;
|
1200 |
|
1201 |
case 4: /*SSAI*/ |
1202 |
{ |
1203 |
TCGv_i32 tmp = tcg_const_i32( |
1204 |
RRR_S | ((RRR_T & 1) << 4)); |
1205 |
gen_right_shift_sar(dc, tmp); |
1206 |
tcg_temp_free(tmp); |
1207 |
} |
1208 |
break;
|
1209 |
|
1210 |
case 6: /*RER*/ |
1211 |
TBD(); |
1212 |
break;
|
1213 |
|
1214 |
case 7: /*WER*/ |
1215 |
TBD(); |
1216 |
break;
|
1217 |
|
1218 |
case 8: /*ROTWw*/ |
1219 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1220 |
gen_check_privilege(dc); |
1221 |
{ |
1222 |
TCGv_i32 tmp = tcg_const_i32( |
1223 |
RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); |
1224 |
gen_helper_rotw(cpu_env, tmp); |
1225 |
tcg_temp_free(tmp); |
1226 |
reset_used_window(dc); |
1227 |
} |
1228 |
break;
|
1229 |
|
1230 |
case 14: /*NSAu*/ |
1231 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); |
1232 |
gen_window_check2(dc, RRR_S, RRR_T); |
1233 |
gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]); |
1234 |
break;
|
1235 |
|
1236 |
case 15: /*NSAUu*/ |
1237 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); |
1238 |
gen_window_check2(dc, RRR_S, RRR_T); |
1239 |
gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]); |
1240 |
break;
|
1241 |
|
1242 |
default: /*reserved*/ |
1243 |
RESERVED(); |
1244 |
break;
|
1245 |
} |
1246 |
break;
|
1247 |
|
1248 |
case 5: /*TLB*/ |
1249 |
HAS_OPTION_BITS( |
1250 |
XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) | |
1251 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
1252 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION)); |
1253 |
gen_check_privilege(dc); |
1254 |
gen_window_check2(dc, RRR_S, RRR_T); |
1255 |
{ |
1256 |
TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0); |
1257 |
|
1258 |
switch (RRR_R & 7) { |
1259 |
case 3: /*RITLB0*/ /*RDTLB0*/ |
1260 |
gen_helper_rtlb0(cpu_R[RRR_T], |
1261 |
cpu_env, cpu_R[RRR_S], dtlb); |
1262 |
break;
|
1263 |
|
1264 |
case 4: /*IITLB*/ /*IDTLB*/ |
1265 |
gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb); |
1266 |
/* This could change memory mapping, so exit tb */
|
1267 |
gen_jumpi_check_loop_end(dc, -1);
|
1268 |
break;
|
1269 |
|
1270 |
case 5: /*PITLB*/ /*PDTLB*/ |
1271 |
tcg_gen_movi_i32(cpu_pc, dc->pc); |
1272 |
gen_helper_ptlb(cpu_R[RRR_T], |
1273 |
cpu_env, cpu_R[RRR_S], dtlb); |
1274 |
break;
|
1275 |
|
1276 |
case 6: /*WITLB*/ /*WDTLB*/ |
1277 |
gen_helper_wtlb( |
1278 |
cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb); |
1279 |
/* This could change memory mapping, so exit tb */
|
1280 |
gen_jumpi_check_loop_end(dc, -1);
|
1281 |
break;
|
1282 |
|
1283 |
case 7: /*RITLB1*/ /*RDTLB1*/ |
1284 |
gen_helper_rtlb1(cpu_R[RRR_T], |
1285 |
cpu_env, cpu_R[RRR_S], dtlb); |
1286 |
break;
|
1287 |
|
1288 |
default:
|
1289 |
tcg_temp_free(dtlb); |
1290 |
RESERVED(); |
1291 |
break;
|
1292 |
} |
1293 |
tcg_temp_free(dtlb); |
1294 |
} |
1295 |
break;
|
1296 |
|
1297 |
case 6: /*RT0*/ |
1298 |
gen_window_check2(dc, RRR_R, RRR_T); |
1299 |
switch (RRR_S) {
|
1300 |
case 0: /*NEG*/ |
1301 |
tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); |
1302 |
break;
|
1303 |
|
1304 |
case 1: /*ABS*/ |
1305 |
{ |
1306 |
int label = gen_new_label();
|
1307 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]); |
1308 |
tcg_gen_brcondi_i32( |
1309 |
TCG_COND_GE, cpu_R[RRR_R], 0, label);
|
1310 |
tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); |
1311 |
gen_set_label(label); |
1312 |
} |
1313 |
break;
|
1314 |
|
1315 |
default: /*reserved*/ |
1316 |
RESERVED(); |
1317 |
break;
|
1318 |
} |
1319 |
break;
|
1320 |
|
1321 |
case 7: /*reserved*/ |
1322 |
RESERVED(); |
1323 |
break;
|
1324 |
|
1325 |
case 8: /*ADD*/ |
1326 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1327 |
tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1328 |
break;
|
1329 |
|
1330 |
case 9: /*ADD**/ |
1331 |
case 10: |
1332 |
case 11: |
1333 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1334 |
{ |
1335 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1336 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
|
1337 |
tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); |
1338 |
tcg_temp_free(tmp); |
1339 |
} |
1340 |
break;
|
1341 |
|
1342 |
case 12: /*SUB*/ |
1343 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1344 |
tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1345 |
break;
|
1346 |
|
1347 |
case 13: /*SUB**/ |
1348 |
case 14: |
1349 |
case 15: |
1350 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1351 |
{ |
1352 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1353 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
|
1354 |
tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); |
1355 |
tcg_temp_free(tmp); |
1356 |
} |
1357 |
break;
|
1358 |
} |
1359 |
break;
|
1360 |
|
1361 |
case 1: /*RST1*/ |
1362 |
switch (OP2) {
|
1363 |
case 0: /*SLLI*/ |
1364 |
case 1: |
1365 |
gen_window_check2(dc, RRR_R, RRR_S); |
1366 |
tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S], |
1367 |
32 - (RRR_T | ((OP2 & 1) << 4))); |
1368 |
break;
|
1369 |
|
1370 |
case 2: /*SRAI*/ |
1371 |
case 3: |
1372 |
gen_window_check2(dc, RRR_R, RRR_T); |
1373 |
tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T], |
1374 |
RRR_S | ((OP2 & 1) << 4)); |
1375 |
break;
|
1376 |
|
1377 |
case 4: /*SRLI*/ |
1378 |
gen_window_check2(dc, RRR_R, RRR_T); |
1379 |
tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S); |
1380 |
break;
|
1381 |
|
1382 |
case 6: /*XSR*/ |
1383 |
{ |
1384 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1385 |
if (RSR_SR >= 64) { |
1386 |
gen_check_privilege(dc); |
1387 |
} |
1388 |
gen_window_check1(dc, RRR_T); |
1389 |
tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); |
1390 |
gen_rsr(dc, cpu_R[RRR_T], RSR_SR); |
1391 |
gen_wsr(dc, RSR_SR, tmp); |
1392 |
tcg_temp_free(tmp); |
1393 |
if (!sregnames[RSR_SR]) {
|
1394 |
TBD(); |
1395 |
} |
1396 |
} |
1397 |
break;
|
1398 |
|
1399 |
/*
|
1400 |
* Note: 64 bit ops are used here solely because SAR values
|
1401 |
* have range 0..63
|
1402 |
*/
|
1403 |
#define gen_shift_reg(cmd, reg) do { \ |
1404 |
TCGv_i64 tmp = tcg_temp_new_i64(); \ |
1405 |
tcg_gen_extu_i32_i64(tmp, reg); \ |
1406 |
tcg_gen_##cmd##_i64(v, v, tmp); \ |
1407 |
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \ |
1408 |
tcg_temp_free_i64(v); \ |
1409 |
tcg_temp_free_i64(tmp); \ |
1410 |
} while (0) |
1411 |
|
1412 |
#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
|
1413 |
|
1414 |
case 8: /*SRC*/ |
1415 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1416 |
{ |
1417 |
TCGv_i64 v = tcg_temp_new_i64(); |
1418 |
tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]); |
1419 |
gen_shift(shr); |
1420 |
} |
1421 |
break;
|
1422 |
|
1423 |
case 9: /*SRL*/ |
1424 |
gen_window_check2(dc, RRR_R, RRR_T); |
1425 |
if (dc->sar_5bit) {
|
1426 |
tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); |
1427 |
} else {
|
1428 |
TCGv_i64 v = tcg_temp_new_i64(); |
1429 |
tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]); |
1430 |
gen_shift(shr); |
1431 |
} |
1432 |
break;
|
1433 |
|
1434 |
case 10: /*SLL*/ |
1435 |
gen_window_check2(dc, RRR_R, RRR_S); |
1436 |
if (dc->sar_m32_5bit) {
|
1437 |
tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32); |
1438 |
} else {
|
1439 |
TCGv_i64 v = tcg_temp_new_i64(); |
1440 |
TCGv_i32 s = tcg_const_i32(32);
|
1441 |
tcg_gen_sub_i32(s, s, cpu_SR[SAR]); |
1442 |
tcg_gen_andi_i32(s, s, 0x3f);
|
1443 |
tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]); |
1444 |
gen_shift_reg(shl, s); |
1445 |
tcg_temp_free(s); |
1446 |
} |
1447 |
break;
|
1448 |
|
1449 |
case 11: /*SRA*/ |
1450 |
gen_window_check2(dc, RRR_R, RRR_T); |
1451 |
if (dc->sar_5bit) {
|
1452 |
tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); |
1453 |
} else {
|
1454 |
TCGv_i64 v = tcg_temp_new_i64(); |
1455 |
tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]); |
1456 |
gen_shift(sar); |
1457 |
} |
1458 |
break;
|
1459 |
#undef gen_shift
|
1460 |
#undef gen_shift_reg
|
1461 |
|
1462 |
case 12: /*MUL16U*/ |
1463 |
HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); |
1464 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1465 |
{ |
1466 |
TCGv_i32 v1 = tcg_temp_new_i32(); |
1467 |
TCGv_i32 v2 = tcg_temp_new_i32(); |
1468 |
tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]); |
1469 |
tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]); |
1470 |
tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); |
1471 |
tcg_temp_free(v2); |
1472 |
tcg_temp_free(v1); |
1473 |
} |
1474 |
break;
|
1475 |
|
1476 |
case 13: /*MUL16S*/ |
1477 |
HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); |
1478 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1479 |
{ |
1480 |
TCGv_i32 v1 = tcg_temp_new_i32(); |
1481 |
TCGv_i32 v2 = tcg_temp_new_i32(); |
1482 |
tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]); |
1483 |
tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]); |
1484 |
tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); |
1485 |
tcg_temp_free(v2); |
1486 |
tcg_temp_free(v1); |
1487 |
} |
1488 |
break;
|
1489 |
|
1490 |
default: /*reserved*/ |
1491 |
RESERVED(); |
1492 |
break;
|
1493 |
} |
1494 |
break;
|
1495 |
|
1496 |
case 2: /*RST2*/ |
1497 |
if (OP2 >= 8) { |
1498 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1499 |
} |
1500 |
|
1501 |
if (OP2 >= 12) { |
1502 |
HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV); |
1503 |
int label = gen_new_label();
|
1504 |
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
|
1505 |
gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); |
1506 |
gen_set_label(label); |
1507 |
} |
1508 |
|
1509 |
switch (OP2) {
|
1510 |
#define BOOLEAN_LOGIC(fn, r, s, t) \
|
1511 |
do { \
|
1512 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); \ |
1513 |
TCGv_i32 tmp1 = tcg_temp_new_i32(); \ |
1514 |
TCGv_i32 tmp2 = tcg_temp_new_i32(); \ |
1515 |
\ |
1516 |
tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \ |
1517 |
tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \ |
1518 |
tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \ |
1519 |
tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
|
1520 |
tcg_temp_free(tmp1); \ |
1521 |
tcg_temp_free(tmp2); \ |
1522 |
} while (0) |
1523 |
|
1524 |
case 0: /*ANDBp*/ |
1525 |
BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T); |
1526 |
break;
|
1527 |
|
1528 |
case 1: /*ANDBCp*/ |
1529 |
BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T); |
1530 |
break;
|
1531 |
|
1532 |
case 2: /*ORBp*/ |
1533 |
BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T); |
1534 |
break;
|
1535 |
|
1536 |
case 3: /*ORBCp*/ |
1537 |
BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T); |
1538 |
break;
|
1539 |
|
1540 |
case 4: /*XORBp*/ |
1541 |
BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T); |
1542 |
break;
|
1543 |
|
1544 |
#undef BOOLEAN_LOGIC
|
1545 |
|
1546 |
case 8: /*MULLi*/ |
1547 |
HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL); |
1548 |
tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1549 |
break;
|
1550 |
|
1551 |
case 10: /*MULUHi*/ |
1552 |
case 11: /*MULSHi*/ |
1553 |
HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH); |
1554 |
{ |
1555 |
TCGv_i64 r = tcg_temp_new_i64(); |
1556 |
TCGv_i64 s = tcg_temp_new_i64(); |
1557 |
TCGv_i64 t = tcg_temp_new_i64(); |
1558 |
|
1559 |
if (OP2 == 10) { |
1560 |
tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]); |
1561 |
tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]); |
1562 |
} else {
|
1563 |
tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]); |
1564 |
tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]); |
1565 |
} |
1566 |
tcg_gen_mul_i64(r, s, t); |
1567 |
tcg_gen_shri_i64(r, r, 32);
|
1568 |
tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r); |
1569 |
|
1570 |
tcg_temp_free_i64(r); |
1571 |
tcg_temp_free_i64(s); |
1572 |
tcg_temp_free_i64(t); |
1573 |
} |
1574 |
break;
|
1575 |
|
1576 |
case 12: /*QUOUi*/ |
1577 |
tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1578 |
break;
|
1579 |
|
1580 |
case 13: /*QUOSi*/ |
1581 |
case 15: /*REMSi*/ |
1582 |
{ |
1583 |
int label1 = gen_new_label();
|
1584 |
int label2 = gen_new_label();
|
1585 |
|
1586 |
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
|
1587 |
label1); |
1588 |
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
|
1589 |
label1); |
1590 |
tcg_gen_movi_i32(cpu_R[RRR_R], |
1591 |
OP2 == 13 ? 0x80000000 : 0); |
1592 |
tcg_gen_br(label2); |
1593 |
gen_set_label(label1); |
1594 |
if (OP2 == 13) { |
1595 |
tcg_gen_div_i32(cpu_R[RRR_R], |
1596 |
cpu_R[RRR_S], cpu_R[RRR_T]); |
1597 |
} else {
|
1598 |
tcg_gen_rem_i32(cpu_R[RRR_R], |
1599 |
cpu_R[RRR_S], cpu_R[RRR_T]); |
1600 |
} |
1601 |
gen_set_label(label2); |
1602 |
} |
1603 |
break;
|
1604 |
|
1605 |
case 14: /*REMUi*/ |
1606 |
tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); |
1607 |
break;
|
1608 |
|
1609 |
default: /*reserved*/ |
1610 |
RESERVED(); |
1611 |
break;
|
1612 |
} |
1613 |
break;
|
1614 |
|
1615 |
case 3: /*RST3*/ |
1616 |
switch (OP2) {
|
1617 |
case 0: /*RSR*/ |
1618 |
if (RSR_SR >= 64) { |
1619 |
gen_check_privilege(dc); |
1620 |
} |
1621 |
gen_window_check1(dc, RRR_T); |
1622 |
gen_rsr(dc, cpu_R[RRR_T], RSR_SR); |
1623 |
if (!sregnames[RSR_SR]) {
|
1624 |
TBD(); |
1625 |
} |
1626 |
break;
|
1627 |
|
1628 |
case 1: /*WSR*/ |
1629 |
if (RSR_SR >= 64) { |
1630 |
gen_check_privilege(dc); |
1631 |
} |
1632 |
gen_window_check1(dc, RRR_T); |
1633 |
gen_wsr(dc, RSR_SR, cpu_R[RRR_T]); |
1634 |
if (!sregnames[RSR_SR]) {
|
1635 |
TBD(); |
1636 |
} |
1637 |
break;
|
1638 |
|
1639 |
case 2: /*SEXTu*/ |
1640 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT); |
1641 |
gen_window_check2(dc, RRR_R, RRR_S); |
1642 |
{ |
1643 |
int shift = 24 - RRR_T; |
1644 |
|
1645 |
if (shift == 24) { |
1646 |
tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1647 |
} else if (shift == 16) { |
1648 |
tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1649 |
} else {
|
1650 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1651 |
tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift); |
1652 |
tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift); |
1653 |
tcg_temp_free(tmp); |
1654 |
} |
1655 |
} |
1656 |
break;
|
1657 |
|
1658 |
case 3: /*CLAMPSu*/ |
1659 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS); |
1660 |
gen_window_check2(dc, RRR_R, RRR_S); |
1661 |
{ |
1662 |
TCGv_i32 tmp1 = tcg_temp_new_i32(); |
1663 |
TCGv_i32 tmp2 = tcg_temp_new_i32(); |
1664 |
int label = gen_new_label();
|
1665 |
|
1666 |
tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
|
1667 |
tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]); |
1668 |
tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7)); |
1669 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1670 |
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
|
1671 |
|
1672 |
tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
|
1673 |
tcg_gen_xori_i32(cpu_R[RRR_R], tmp1, |
1674 |
0xffffffff >> (25 - RRR_T)); |
1675 |
|
1676 |
gen_set_label(label); |
1677 |
|
1678 |
tcg_temp_free(tmp1); |
1679 |
tcg_temp_free(tmp2); |
1680 |
} |
1681 |
break;
|
1682 |
|
1683 |
case 4: /*MINu*/ |
1684 |
case 5: /*MAXu*/ |
1685 |
case 6: /*MINUu*/ |
1686 |
case 7: /*MAXUu*/ |
1687 |
HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX); |
1688 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1689 |
{ |
1690 |
static const TCGCond cond[] = { |
1691 |
TCG_COND_LE, |
1692 |
TCG_COND_GE, |
1693 |
TCG_COND_LEU, |
1694 |
TCG_COND_GEU |
1695 |
}; |
1696 |
int label = gen_new_label();
|
1697 |
|
1698 |
if (RRR_R != RRR_T) {
|
1699 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1700 |
tcg_gen_brcond_i32(cond[OP2 - 4],
|
1701 |
cpu_R[RRR_S], cpu_R[RRR_T], label); |
1702 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]); |
1703 |
} else {
|
1704 |
tcg_gen_brcond_i32(cond[OP2 - 4],
|
1705 |
cpu_R[RRR_T], cpu_R[RRR_S], label); |
1706 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1707 |
} |
1708 |
gen_set_label(label); |
1709 |
} |
1710 |
break;
|
1711 |
|
1712 |
case 8: /*MOVEQZ*/ |
1713 |
case 9: /*MOVNEZ*/ |
1714 |
case 10: /*MOVLTZ*/ |
1715 |
case 11: /*MOVGEZ*/ |
1716 |
gen_window_check3(dc, RRR_R, RRR_S, RRR_T); |
1717 |
{ |
1718 |
static const TCGCond cond[] = { |
1719 |
TCG_COND_NE, |
1720 |
TCG_COND_EQ, |
1721 |
TCG_COND_GE, |
1722 |
TCG_COND_LT |
1723 |
}; |
1724 |
int label = gen_new_label();
|
1725 |
tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label); |
1726 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1727 |
gen_set_label(label); |
1728 |
} |
1729 |
break;
|
1730 |
|
1731 |
case 12: /*MOVFp*/ |
1732 |
case 13: /*MOVTp*/ |
1733 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
1734 |
gen_window_check2(dc, RRR_R, RRR_S); |
1735 |
{ |
1736 |
int label = gen_new_label();
|
1737 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1738 |
|
1739 |
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
|
1740 |
tcg_gen_brcondi_i32( |
1741 |
OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
|
1742 |
tmp, 0, label);
|
1743 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); |
1744 |
gen_set_label(label); |
1745 |
tcg_temp_free(tmp); |
1746 |
} |
1747 |
break;
|
1748 |
|
1749 |
case 14: /*RUR*/ |
1750 |
gen_window_check1(dc, RRR_R); |
1751 |
{ |
1752 |
int st = (RRR_S << 4) + RRR_T; |
1753 |
if (uregnames[st]) {
|
1754 |
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]); |
1755 |
} else {
|
1756 |
qemu_log("RUR %d not implemented, ", st);
|
1757 |
TBD(); |
1758 |
} |
1759 |
} |
1760 |
break;
|
1761 |
|
1762 |
case 15: /*WUR*/ |
1763 |
gen_window_check1(dc, RRR_T); |
1764 |
{ |
1765 |
if (uregnames[RSR_SR]) {
|
1766 |
tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]); |
1767 |
} else {
|
1768 |
qemu_log("WUR %d not implemented, ", RSR_SR);
|
1769 |
TBD(); |
1770 |
} |
1771 |
} |
1772 |
break;
|
1773 |
|
1774 |
} |
1775 |
break;
|
1776 |
|
1777 |
case 4: /*EXTUI*/ |
1778 |
case 5: |
1779 |
gen_window_check2(dc, RRR_R, RRR_T); |
1780 |
{ |
1781 |
int shiftimm = RRR_S | ((OP1 & 1) << 4); |
1782 |
int maskimm = (1 << (OP2 + 1)) - 1; |
1783 |
|
1784 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
1785 |
|
1786 |
if (shiftimm) {
|
1787 |
tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm); |
1788 |
} else {
|
1789 |
tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); |
1790 |
} |
1791 |
|
1792 |
switch (maskimm) {
|
1793 |
case 0xff: |
1794 |
tcg_gen_ext8u_i32(cpu_R[RRR_R], tmp); |
1795 |
break;
|
1796 |
|
1797 |
case 0xffff: |
1798 |
tcg_gen_ext16u_i32(cpu_R[RRR_R], tmp); |
1799 |
break;
|
1800 |
|
1801 |
default:
|
1802 |
tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm); |
1803 |
break;
|
1804 |
} |
1805 |
tcg_temp_free(tmp); |
1806 |
} |
1807 |
break;
|
1808 |
|
1809 |
case 6: /*CUST0*/ |
1810 |
RESERVED(); |
1811 |
break;
|
1812 |
|
1813 |
case 7: /*CUST1*/ |
1814 |
RESERVED(); |
1815 |
break;
|
1816 |
|
1817 |
case 8: /*LSCXp*/ |
1818 |
HAS_OPTION(XTENSA_OPTION_COPROCESSOR); |
1819 |
TBD(); |
1820 |
break;
|
1821 |
|
1822 |
case 9: /*LSC4*/ |
1823 |
gen_window_check2(dc, RRR_S, RRR_T); |
1824 |
switch (OP2) {
|
1825 |
case 0: /*L32E*/ |
1826 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1827 |
gen_check_privilege(dc); |
1828 |
{ |
1829 |
TCGv_i32 addr = tcg_temp_new_i32(); |
1830 |
tcg_gen_addi_i32(addr, cpu_R[RRR_S], |
1831 |
(0xffffffc0 | (RRR_R << 2))); |
1832 |
tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring); |
1833 |
tcg_temp_free(addr); |
1834 |
} |
1835 |
break;
|
1836 |
|
1837 |
case 4: /*S32E*/ |
1838 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
1839 |
gen_check_privilege(dc); |
1840 |
{ |
1841 |
TCGv_i32 addr = tcg_temp_new_i32(); |
1842 |
tcg_gen_addi_i32(addr, cpu_R[RRR_S], |
1843 |
(0xffffffc0 | (RRR_R << 2))); |
1844 |
tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring); |
1845 |
tcg_temp_free(addr); |
1846 |
} |
1847 |
break;
|
1848 |
|
1849 |
default:
|
1850 |
RESERVED(); |
1851 |
break;
|
1852 |
} |
1853 |
break;
|
1854 |
|
1855 |
case 10: /*FP0*/ |
1856 |
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); |
1857 |
TBD(); |
1858 |
break;
|
1859 |
|
1860 |
case 11: /*FP1*/ |
1861 |
HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); |
1862 |
TBD(); |
1863 |
break;
|
1864 |
|
1865 |
default: /*reserved*/ |
1866 |
RESERVED(); |
1867 |
break;
|
1868 |
} |
1869 |
break;
|
1870 |
|
1871 |
case 1: /*L32R*/ |
1872 |
gen_window_check1(dc, RRR_T); |
1873 |
{ |
1874 |
TCGv_i32 tmp = tcg_const_i32( |
1875 |
((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ? |
1876 |
0 : ((dc->pc + 3) & ~3)) + |
1877 |
(0xfffc0000 | (RI16_IMM16 << 2))); |
1878 |
|
1879 |
if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
|
1880 |
tcg_gen_add_i32(tmp, tmp, dc->litbase); |
1881 |
} |
1882 |
tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring); |
1883 |
tcg_temp_free(tmp); |
1884 |
} |
1885 |
break;
|
1886 |
|
1887 |
case 2: /*LSAI*/ |
1888 |
#define gen_load_store(type, shift) do { \ |
1889 |
TCGv_i32 addr = tcg_temp_new_i32(); \ |
1890 |
gen_window_check2(dc, RRI8_S, RRI8_T); \ |
1891 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \ |
1892 |
if (shift) { \
|
1893 |
gen_load_store_alignment(dc, shift, addr, false); \
|
1894 |
} \ |
1895 |
tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ |
1896 |
tcg_temp_free(addr); \ |
1897 |
} while (0) |
1898 |
|
1899 |
switch (RRI8_R) {
|
1900 |
case 0: /*L8UI*/ |
1901 |
gen_load_store(ld8u, 0);
|
1902 |
break;
|
1903 |
|
1904 |
case 1: /*L16UI*/ |
1905 |
gen_load_store(ld16u, 1);
|
1906 |
break;
|
1907 |
|
1908 |
case 2: /*L32I*/ |
1909 |
gen_load_store(ld32u, 2);
|
1910 |
break;
|
1911 |
|
1912 |
case 4: /*S8I*/ |
1913 |
gen_load_store(st8, 0);
|
1914 |
break;
|
1915 |
|
1916 |
case 5: /*S16I*/ |
1917 |
gen_load_store(st16, 1);
|
1918 |
break;
|
1919 |
|
1920 |
case 6: /*S32I*/ |
1921 |
gen_load_store(st32, 2);
|
1922 |
break;
|
1923 |
|
1924 |
case 7: /*CACHEc*/ |
1925 |
if (RRI8_T < 8) { |
1926 |
HAS_OPTION(XTENSA_OPTION_DCACHE); |
1927 |
} |
1928 |
|
1929 |
switch (RRI8_T) {
|
1930 |
case 0: /*DPFRc*/ |
1931 |
break;
|
1932 |
|
1933 |
case 1: /*DPFWc*/ |
1934 |
break;
|
1935 |
|
1936 |
case 2: /*DPFROc*/ |
1937 |
break;
|
1938 |
|
1939 |
case 3: /*DPFWOc*/ |
1940 |
break;
|
1941 |
|
1942 |
case 4: /*DHWBc*/ |
1943 |
break;
|
1944 |
|
1945 |
case 5: /*DHWBIc*/ |
1946 |
break;
|
1947 |
|
1948 |
case 6: /*DHIc*/ |
1949 |
break;
|
1950 |
|
1951 |
case 7: /*DIIc*/ |
1952 |
break;
|
1953 |
|
1954 |
case 8: /*DCEc*/ |
1955 |
switch (OP1) {
|
1956 |
case 0: /*DPFLl*/ |
1957 |
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); |
1958 |
break;
|
1959 |
|
1960 |
case 2: /*DHUl*/ |
1961 |
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); |
1962 |
break;
|
1963 |
|
1964 |
case 3: /*DIUl*/ |
1965 |
HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); |
1966 |
break;
|
1967 |
|
1968 |
case 4: /*DIWBc*/ |
1969 |
HAS_OPTION(XTENSA_OPTION_DCACHE); |
1970 |
break;
|
1971 |
|
1972 |
case 5: /*DIWBIc*/ |
1973 |
HAS_OPTION(XTENSA_OPTION_DCACHE); |
1974 |
break;
|
1975 |
|
1976 |
default: /*reserved*/ |
1977 |
RESERVED(); |
1978 |
break;
|
1979 |
|
1980 |
} |
1981 |
break;
|
1982 |
|
1983 |
case 12: /*IPFc*/ |
1984 |
HAS_OPTION(XTENSA_OPTION_ICACHE); |
1985 |
break;
|
1986 |
|
1987 |
case 13: /*ICEc*/ |
1988 |
switch (OP1) {
|
1989 |
case 0: /*IPFLl*/ |
1990 |
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); |
1991 |
break;
|
1992 |
|
1993 |
case 2: /*IHUl*/ |
1994 |
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); |
1995 |
break;
|
1996 |
|
1997 |
case 3: /*IIUl*/ |
1998 |
HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); |
1999 |
break;
|
2000 |
|
2001 |
default: /*reserved*/ |
2002 |
RESERVED(); |
2003 |
break;
|
2004 |
} |
2005 |
break;
|
2006 |
|
2007 |
case 14: /*IHIc*/ |
2008 |
HAS_OPTION(XTENSA_OPTION_ICACHE); |
2009 |
break;
|
2010 |
|
2011 |
case 15: /*IIIc*/ |
2012 |
HAS_OPTION(XTENSA_OPTION_ICACHE); |
2013 |
break;
|
2014 |
|
2015 |
default: /*reserved*/ |
2016 |
RESERVED(); |
2017 |
break;
|
2018 |
} |
2019 |
break;
|
2020 |
|
2021 |
case 9: /*L16SI*/ |
2022 |
gen_load_store(ld16s, 1);
|
2023 |
break;
|
2024 |
#undef gen_load_store
|
2025 |
|
2026 |
case 10: /*MOVI*/ |
2027 |
gen_window_check1(dc, RRI8_T); |
2028 |
tcg_gen_movi_i32(cpu_R[RRI8_T], |
2029 |
RRI8_IMM8 | (RRI8_S << 8) |
|
2030 |
((RRI8_S & 0x8) ? 0xfffff000 : 0)); |
2031 |
break;
|
2032 |
|
2033 |
#define gen_load_store_no_hw_align(type) do { \ |
2034 |
TCGv_i32 addr = tcg_temp_local_new_i32(); \ |
2035 |
gen_window_check2(dc, RRI8_S, RRI8_T); \ |
2036 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
|
2037 |
gen_load_store_alignment(dc, 2, addr, true); \ |
2038 |
tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ |
2039 |
tcg_temp_free(addr); \ |
2040 |
} while (0) |
2041 |
|
2042 |
case 11: /*L32AIy*/ |
2043 |
HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); |
2044 |
gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
|
2045 |
break;
|
2046 |
|
2047 |
case 12: /*ADDI*/ |
2048 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2049 |
tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE); |
2050 |
break;
|
2051 |
|
2052 |
case 13: /*ADDMI*/ |
2053 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2054 |
tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
|
2055 |
break;
|
2056 |
|
2057 |
case 14: /*S32C1Iy*/ |
2058 |
HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE); |
2059 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2060 |
{ |
2061 |
int label = gen_new_label();
|
2062 |
TCGv_i32 tmp = tcg_temp_local_new_i32(); |
2063 |
TCGv_i32 addr = tcg_temp_local_new_i32(); |
2064 |
|
2065 |
tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]); |
2066 |
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
|
2067 |
gen_load_store_alignment(dc, 2, addr, true); |
2068 |
tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); |
2069 |
tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T], |
2070 |
cpu_SR[SCOMPARE1], label); |
2071 |
|
2072 |
tcg_gen_qemu_st32(tmp, addr, dc->cring); |
2073 |
|
2074 |
gen_set_label(label); |
2075 |
tcg_temp_free(addr); |
2076 |
tcg_temp_free(tmp); |
2077 |
} |
2078 |
break;
|
2079 |
|
2080 |
case 15: /*S32RIy*/ |
2081 |
HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); |
2082 |
gen_load_store_no_hw_align(st32); /*TODO release?*/
|
2083 |
break;
|
2084 |
#undef gen_load_store_no_hw_align
|
2085 |
|
2086 |
default: /*reserved*/ |
2087 |
RESERVED(); |
2088 |
break;
|
2089 |
} |
2090 |
break;
|
2091 |
|
2092 |
case 3: /*LSCIp*/ |
2093 |
HAS_OPTION(XTENSA_OPTION_COPROCESSOR); |
2094 |
TBD(); |
2095 |
break;
|
2096 |
|
2097 |
case 4: /*MAC16d*/ |
2098 |
HAS_OPTION(XTENSA_OPTION_MAC16); |
2099 |
{ |
2100 |
enum {
|
2101 |
MAC16_UMUL = 0x0,
|
2102 |
MAC16_MUL = 0x4,
|
2103 |
MAC16_MULA = 0x8,
|
2104 |
MAC16_MULS = 0xc,
|
2105 |
MAC16_NONE = 0xf,
|
2106 |
} op = OP1 & 0xc;
|
2107 |
bool is_m1_sr = (OP2 & 0x3) == 2; |
2108 |
bool is_m2_sr = (OP2 & 0xc) == 0; |
2109 |
uint32_t ld_offset = 0;
|
2110 |
|
2111 |
if (OP2 > 9) { |
2112 |
RESERVED(); |
2113 |
} |
2114 |
|
2115 |
switch (OP2 & 2) { |
2116 |
case 0: /*MACI?/MACC?*/ |
2117 |
is_m1_sr = true;
|
2118 |
ld_offset = (OP2 & 1) ? -4 : 4; |
2119 |
|
2120 |
if (OP2 >= 8) { /*MACI/MACC*/ |
2121 |
if (OP1 == 0) { /*LDINC/LDDEC*/ |
2122 |
op = MAC16_NONE; |
2123 |
} else {
|
2124 |
RESERVED(); |
2125 |
} |
2126 |
} else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/ |
2127 |
RESERVED(); |
2128 |
} |
2129 |
break;
|
2130 |
|
2131 |
case 2: /*MACD?/MACA?*/ |
2132 |
if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/ |
2133 |
RESERVED(); |
2134 |
} |
2135 |
break;
|
2136 |
} |
2137 |
|
2138 |
if (op != MAC16_NONE) {
|
2139 |
if (!is_m1_sr) {
|
2140 |
gen_window_check1(dc, RRR_S); |
2141 |
} |
2142 |
if (!is_m2_sr) {
|
2143 |
gen_window_check1(dc, RRR_T); |
2144 |
} |
2145 |
} |
2146 |
|
2147 |
{ |
2148 |
TCGv_i32 vaddr = tcg_temp_new_i32(); |
2149 |
TCGv_i32 mem32 = tcg_temp_new_i32(); |
2150 |
|
2151 |
if (ld_offset) {
|
2152 |
gen_window_check1(dc, RRR_S); |
2153 |
tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset); |
2154 |
gen_load_store_alignment(dc, 2, vaddr, false); |
2155 |
tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); |
2156 |
} |
2157 |
if (op != MAC16_NONE) {
|
2158 |
TCGv_i32 m1 = gen_mac16_m( |
2159 |
is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S], |
2160 |
OP1 & 1, op == MAC16_UMUL);
|
2161 |
TCGv_i32 m2 = gen_mac16_m( |
2162 |
is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
|
2163 |
OP1 & 2, op == MAC16_UMUL);
|
2164 |
|
2165 |
if (op == MAC16_MUL || op == MAC16_UMUL) {
|
2166 |
tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); |
2167 |
if (op == MAC16_UMUL) {
|
2168 |
tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
|
2169 |
} else {
|
2170 |
tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
|
2171 |
} |
2172 |
} else {
|
2173 |
TCGv_i32 res = tcg_temp_new_i32(); |
2174 |
TCGv_i64 res64 = tcg_temp_new_i64(); |
2175 |
TCGv_i64 tmp = tcg_temp_new_i64(); |
2176 |
|
2177 |
tcg_gen_mul_i32(res, m1, m2); |
2178 |
tcg_gen_ext_i32_i64(res64, res); |
2179 |
tcg_gen_concat_i32_i64(tmp, |
2180 |
cpu_SR[ACCLO], cpu_SR[ACCHI]); |
2181 |
if (op == MAC16_MULA) {
|
2182 |
tcg_gen_add_i64(tmp, tmp, res64); |
2183 |
} else {
|
2184 |
tcg_gen_sub_i64(tmp, tmp, res64); |
2185 |
} |
2186 |
tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp); |
2187 |
tcg_gen_shri_i64(tmp, tmp, 32);
|
2188 |
tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp); |
2189 |
tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); |
2190 |
|
2191 |
tcg_temp_free(res); |
2192 |
tcg_temp_free_i64(res64); |
2193 |
tcg_temp_free_i64(tmp); |
2194 |
} |
2195 |
tcg_temp_free(m1); |
2196 |
tcg_temp_free(m2); |
2197 |
} |
2198 |
if (ld_offset) {
|
2199 |
tcg_gen_mov_i32(cpu_R[RRR_S], vaddr); |
2200 |
tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32); |
2201 |
} |
2202 |
tcg_temp_free(vaddr); |
2203 |
tcg_temp_free(mem32); |
2204 |
} |
2205 |
} |
2206 |
break;
|
2207 |
|
2208 |
case 5: /*CALLN*/ |
2209 |
switch (CALL_N) {
|
2210 |
case 0: /*CALL0*/ |
2211 |
tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
|
2212 |
gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); |
2213 |
break;
|
2214 |
|
2215 |
case 1: /*CALL4w*/ |
2216 |
case 2: /*CALL8w*/ |
2217 |
case 3: /*CALL12w*/ |
2218 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
2219 |
gen_window_check1(dc, CALL_N << 2);
|
2220 |
gen_callwi(dc, CALL_N, |
2221 |
(dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); |
2222 |
break;
|
2223 |
} |
2224 |
break;
|
2225 |
|
2226 |
case 6: /*SI*/ |
2227 |
switch (CALL_N) {
|
2228 |
case 0: /*J*/ |
2229 |
gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0); |
2230 |
break;
|
2231 |
|
2232 |
case 1: /*BZ*/ |
2233 |
gen_window_check1(dc, BRI12_S); |
2234 |
{ |
2235 |
static const TCGCond cond[] = { |
2236 |
TCG_COND_EQ, /*BEQZ*/
|
2237 |
TCG_COND_NE, /*BNEZ*/
|
2238 |
TCG_COND_LT, /*BLTZ*/
|
2239 |
TCG_COND_GE, /*BGEZ*/
|
2240 |
}; |
2241 |
|
2242 |
gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0, |
2243 |
4 + BRI12_IMM12_SE);
|
2244 |
} |
2245 |
break;
|
2246 |
|
2247 |
case 2: /*BI0*/ |
2248 |
gen_window_check1(dc, BRI8_S); |
2249 |
{ |
2250 |
static const TCGCond cond[] = { |
2251 |
TCG_COND_EQ, /*BEQI*/
|
2252 |
TCG_COND_NE, /*BNEI*/
|
2253 |
TCG_COND_LT, /*BLTI*/
|
2254 |
TCG_COND_GE, /*BGEI*/
|
2255 |
}; |
2256 |
|
2257 |
gen_brcondi(dc, cond[BRI8_M & 3],
|
2258 |
cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
|
2259 |
} |
2260 |
break;
|
2261 |
|
2262 |
case 3: /*BI1*/ |
2263 |
switch (BRI8_M) {
|
2264 |
case 0: /*ENTRYw*/ |
2265 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
2266 |
{ |
2267 |
TCGv_i32 pc = tcg_const_i32(dc->pc); |
2268 |
TCGv_i32 s = tcg_const_i32(BRI12_S); |
2269 |
TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); |
2270 |
gen_advance_ccount(dc); |
2271 |
gen_helper_entry(cpu_env, pc, s, imm); |
2272 |
tcg_temp_free(imm); |
2273 |
tcg_temp_free(s); |
2274 |
tcg_temp_free(pc); |
2275 |
reset_used_window(dc); |
2276 |
} |
2277 |
break;
|
2278 |
|
2279 |
case 1: /*B1*/ |
2280 |
switch (BRI8_R) {
|
2281 |
case 0: /*BFp*/ |
2282 |
case 1: /*BTp*/ |
2283 |
HAS_OPTION(XTENSA_OPTION_BOOLEAN); |
2284 |
{ |
2285 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2286 |
tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
|
2287 |
gen_brcondi(dc, |
2288 |
BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
|
2289 |
tmp, 0, 4 + RRI8_IMM8_SE); |
2290 |
tcg_temp_free(tmp); |
2291 |
} |
2292 |
break;
|
2293 |
|
2294 |
case 8: /*LOOP*/ |
2295 |
case 9: /*LOOPNEZ*/ |
2296 |
case 10: /*LOOPGTZ*/ |
2297 |
HAS_OPTION(XTENSA_OPTION_LOOP); |
2298 |
gen_window_check1(dc, RRI8_S); |
2299 |
{ |
2300 |
uint32_t lend = dc->pc + RRI8_IMM8 + 4;
|
2301 |
TCGv_i32 tmp = tcg_const_i32(lend); |
2302 |
|
2303 |
tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
|
2304 |
tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); |
2305 |
gen_helper_wsr_lend(cpu_env, tmp); |
2306 |
tcg_temp_free(tmp); |
2307 |
|
2308 |
if (BRI8_R > 8) { |
2309 |
int label = gen_new_label();
|
2310 |
tcg_gen_brcondi_i32( |
2311 |
BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
|
2312 |
cpu_R[RRI8_S], 0, label);
|
2313 |
gen_jumpi(dc, lend, 1);
|
2314 |
gen_set_label(label); |
2315 |
} |
2316 |
|
2317 |
gen_jumpi(dc, dc->next_pc, 0);
|
2318 |
} |
2319 |
break;
|
2320 |
|
2321 |
default: /*reserved*/ |
2322 |
RESERVED(); |
2323 |
break;
|
2324 |
|
2325 |
} |
2326 |
break;
|
2327 |
|
2328 |
case 2: /*BLTUI*/ |
2329 |
case 3: /*BGEUI*/ |
2330 |
gen_window_check1(dc, BRI8_S); |
2331 |
gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
|
2332 |
cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
|
2333 |
break;
|
2334 |
} |
2335 |
break;
|
2336 |
|
2337 |
} |
2338 |
break;
|
2339 |
|
2340 |
case 7: /*B*/ |
2341 |
{ |
2342 |
TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
|
2343 |
|
2344 |
switch (RRI8_R & 7) { |
2345 |
case 0: /*BNONE*/ /*BANY*/ |
2346 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2347 |
{ |
2348 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2349 |
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); |
2350 |
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); |
2351 |
tcg_temp_free(tmp); |
2352 |
} |
2353 |
break;
|
2354 |
|
2355 |
case 1: /*BEQ*/ /*BNE*/ |
2356 |
case 2: /*BLT*/ /*BGE*/ |
2357 |
case 3: /*BLTU*/ /*BGEU*/ |
2358 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2359 |
{ |
2360 |
static const TCGCond cond[] = { |
2361 |
[1] = TCG_COND_EQ,
|
2362 |
[2] = TCG_COND_LT,
|
2363 |
[3] = TCG_COND_LTU,
|
2364 |
[9] = TCG_COND_NE,
|
2365 |
[10] = TCG_COND_GE,
|
2366 |
[11] = TCG_COND_GEU,
|
2367 |
}; |
2368 |
gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T], |
2369 |
4 + RRI8_IMM8_SE);
|
2370 |
} |
2371 |
break;
|
2372 |
|
2373 |
case 4: /*BALL*/ /*BNALL*/ |
2374 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2375 |
{ |
2376 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2377 |
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); |
2378 |
gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T], |
2379 |
4 + RRI8_IMM8_SE);
|
2380 |
tcg_temp_free(tmp); |
2381 |
} |
2382 |
break;
|
2383 |
|
2384 |
case 5: /*BBC*/ /*BBS*/ |
2385 |
gen_window_check2(dc, RRI8_S, RRI8_T); |
2386 |
{ |
2387 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2388 |
TCGv_i32 bit = tcg_const_i32(0x80000000);
|
2389 |
#else
|
2390 |
TCGv_i32 bit = tcg_const_i32(0x00000001);
|
2391 |
#endif
|
2392 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2393 |
tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
|
2394 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2395 |
tcg_gen_shr_i32(bit, bit, tmp); |
2396 |
#else
|
2397 |
tcg_gen_shl_i32(bit, bit, tmp); |
2398 |
#endif
|
2399 |
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit); |
2400 |
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); |
2401 |
tcg_temp_free(tmp); |
2402 |
tcg_temp_free(bit); |
2403 |
} |
2404 |
break;
|
2405 |
|
2406 |
case 6: /*BBCI*/ /*BBSI*/ |
2407 |
case 7: |
2408 |
gen_window_check1(dc, RRI8_S); |
2409 |
{ |
2410 |
TCGv_i32 tmp = tcg_temp_new_i32(); |
2411 |
tcg_gen_andi_i32(tmp, cpu_R[RRI8_S], |
2412 |
#ifdef TARGET_WORDS_BIGENDIAN
|
2413 |
0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T)); |
2414 |
#else
|
2415 |
0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T)); |
2416 |
#endif
|
2417 |
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); |
2418 |
tcg_temp_free(tmp); |
2419 |
} |
2420 |
break;
|
2421 |
|
2422 |
} |
2423 |
} |
2424 |
break;
|
2425 |
|
2426 |
#define gen_narrow_load_store(type) do { \ |
2427 |
TCGv_i32 addr = tcg_temp_new_i32(); \ |
2428 |
gen_window_check2(dc, RRRN_S, RRRN_T); \ |
2429 |
tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
|
2430 |
gen_load_store_alignment(dc, 2, addr, false); \ |
2431 |
tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \ |
2432 |
tcg_temp_free(addr); \ |
2433 |
} while (0) |
2434 |
|
2435 |
case 8: /*L32I.Nn*/ |
2436 |
gen_narrow_load_store(ld32u); |
2437 |
break;
|
2438 |
|
2439 |
case 9: /*S32I.Nn*/ |
2440 |
gen_narrow_load_store(st32); |
2441 |
break;
|
2442 |
#undef gen_narrow_load_store
|
2443 |
|
2444 |
case 10: /*ADD.Nn*/ |
2445 |
gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T); |
2446 |
tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]); |
2447 |
break;
|
2448 |
|
2449 |
case 11: /*ADDI.Nn*/ |
2450 |
gen_window_check2(dc, RRRN_R, RRRN_S); |
2451 |
tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
|
2452 |
break;
|
2453 |
|
2454 |
case 12: /*ST2n*/ |
2455 |
gen_window_check1(dc, RRRN_S); |
2456 |
if (RRRN_T < 8) { /*MOVI.Nn*/ |
2457 |
tcg_gen_movi_i32(cpu_R[RRRN_S], |
2458 |
RRRN_R | (RRRN_T << 4) |
|
2459 |
((RRRN_T & 6) == 6 ? 0xffffff80 : 0)); |
2460 |
} else { /*BEQZ.Nn*/ /*BNEZ.Nn*/ |
2461 |
TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
|
2462 |
|
2463 |
gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
|
2464 |
4 + (RRRN_R | ((RRRN_T & 3) << 4))); |
2465 |
} |
2466 |
break;
|
2467 |
|
2468 |
case 13: /*ST3n*/ |
2469 |
switch (RRRN_R) {
|
2470 |
case 0: /*MOV.Nn*/ |
2471 |
gen_window_check2(dc, RRRN_S, RRRN_T); |
2472 |
tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]); |
2473 |
break;
|
2474 |
|
2475 |
case 15: /*S3*/ |
2476 |
switch (RRRN_T) {
|
2477 |
case 0: /*RET.Nn*/ |
2478 |
gen_jump(dc, cpu_R[0]);
|
2479 |
break;
|
2480 |
|
2481 |
case 1: /*RETW.Nn*/ |
2482 |
HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); |
2483 |
{ |
2484 |
TCGv_i32 tmp = tcg_const_i32(dc->pc); |
2485 |
gen_advance_ccount(dc); |
2486 |
gen_helper_retw(tmp, cpu_env, tmp); |
2487 |
gen_jump(dc, tmp); |
2488 |
tcg_temp_free(tmp); |
2489 |
} |
2490 |
break;
|
2491 |
|
2492 |
case 2: /*BREAK.Nn*/ |
2493 |
HAS_OPTION(XTENSA_OPTION_DEBUG); |
2494 |
if (dc->debug) {
|
2495 |
gen_debug_exception(dc, DEBUGCAUSE_BN); |
2496 |
} |
2497 |
break;
|
2498 |
|
2499 |
case 3: /*NOP.Nn*/ |
2500 |
break;
|
2501 |
|
2502 |
case 6: /*ILL.Nn*/ |
2503 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
2504 |
break;
|
2505 |
|
2506 |
default: /*reserved*/ |
2507 |
RESERVED(); |
2508 |
break;
|
2509 |
} |
2510 |
break;
|
2511 |
|
2512 |
default: /*reserved*/ |
2513 |
RESERVED(); |
2514 |
break;
|
2515 |
} |
2516 |
break;
|
2517 |
|
2518 |
default: /*reserved*/ |
2519 |
RESERVED(); |
2520 |
break;
|
2521 |
} |
2522 |
|
2523 |
gen_check_loop_end(dc, 0);
|
2524 |
dc->pc = dc->next_pc; |
2525 |
|
2526 |
return;
|
2527 |
|
2528 |
invalid_opcode:
|
2529 |
qemu_log("INVALID(pc = %08x)\n", dc->pc);
|
2530 |
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
2531 |
#undef HAS_OPTION
|
2532 |
} |
2533 |
|
2534 |
static void check_breakpoint(CPUXtensaState *env, DisasContext *dc) |
2535 |
{ |
2536 |
CPUBreakpoint *bp; |
2537 |
|
2538 |
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
|
2539 |
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
2540 |
if (bp->pc == dc->pc) {
|
2541 |
tcg_gen_movi_i32(cpu_pc, dc->pc); |
2542 |
gen_exception(dc, EXCP_DEBUG); |
2543 |
dc->is_jmp = DISAS_UPDATE; |
2544 |
} |
2545 |
} |
2546 |
} |
2547 |
} |
2548 |
|
2549 |
static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc) |
2550 |
{ |
2551 |
unsigned i;
|
2552 |
|
2553 |
for (i = 0; i < dc->config->nibreak; ++i) { |
2554 |
if ((env->sregs[IBREAKENABLE] & (1 << i)) && |
2555 |
env->sregs[IBREAKA + i] == dc->pc) { |
2556 |
gen_debug_exception(dc, DEBUGCAUSE_IB); |
2557 |
break;
|
2558 |
} |
2559 |
} |
2560 |
} |
2561 |
|
2562 |
static void gen_intermediate_code_internal( |
2563 |
CPUXtensaState *env, TranslationBlock *tb, int search_pc)
|
2564 |
{ |
2565 |
DisasContext dc; |
2566 |
int insn_count = 0; |
2567 |
int j, lj = -1; |
2568 |
uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
2569 |
int max_insns = tb->cflags & CF_COUNT_MASK;
|
2570 |
uint32_t pc_start = tb->pc; |
2571 |
uint32_t next_page_start = |
2572 |
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
2573 |
|
2574 |
if (max_insns == 0) { |
2575 |
max_insns = CF_COUNT_MASK; |
2576 |
} |
2577 |
|
2578 |
dc.config = env->config; |
2579 |
dc.singlestep_enabled = env->singlestep_enabled; |
2580 |
dc.tb = tb; |
2581 |
dc.pc = pc_start; |
2582 |
dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK; |
2583 |
dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
|
2584 |
dc.lbeg = env->sregs[LBEG]; |
2585 |
dc.lend = env->sregs[LEND]; |
2586 |
dc.is_jmp = DISAS_NEXT; |
2587 |
dc.ccount_delta = 0;
|
2588 |
dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG; |
2589 |
dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT; |
2590 |
|
2591 |
init_litbase(&dc); |
2592 |
init_sar_tracker(&dc); |
2593 |
reset_used_window(&dc); |
2594 |
if (dc.icount) {
|
2595 |
dc.next_icount = tcg_temp_local_new_i32(); |
2596 |
} |
2597 |
|
2598 |
gen_icount_start(); |
2599 |
|
2600 |
if (env->singlestep_enabled && env->exception_taken) {
|
2601 |
env->exception_taken = 0;
|
2602 |
tcg_gen_movi_i32(cpu_pc, dc.pc); |
2603 |
gen_exception(&dc, EXCP_DEBUG); |
2604 |
} |
2605 |
|
2606 |
do {
|
2607 |
check_breakpoint(env, &dc); |
2608 |
|
2609 |
if (search_pc) {
|
2610 |
j = gen_opc_ptr - gen_opc_buf; |
2611 |
if (lj < j) {
|
2612 |
lj++; |
2613 |
while (lj < j) {
|
2614 |
gen_opc_instr_start[lj++] = 0;
|
2615 |
} |
2616 |
} |
2617 |
gen_opc_pc[lj] = dc.pc; |
2618 |
gen_opc_instr_start[lj] = 1;
|
2619 |
gen_opc_icount[lj] = insn_count; |
2620 |
} |
2621 |
|
2622 |
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
|
2623 |
tcg_gen_debug_insn_start(dc.pc); |
2624 |
} |
2625 |
|
2626 |
++dc.ccount_delta; |
2627 |
|
2628 |
if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { |
2629 |
gen_io_start(); |
2630 |
} |
2631 |
|
2632 |
if (dc.icount) {
|
2633 |
int label = gen_new_label();
|
2634 |
|
2635 |
tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
|
2636 |
tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
|
2637 |
tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]); |
2638 |
if (dc.debug) {
|
2639 |
gen_debug_exception(&dc, DEBUGCAUSE_IC); |
2640 |
} |
2641 |
gen_set_label(label); |
2642 |
} |
2643 |
|
2644 |
if (dc.debug) {
|
2645 |
gen_ibreak_check(env, &dc); |
2646 |
} |
2647 |
|
2648 |
disas_xtensa_insn(&dc); |
2649 |
++insn_count; |
2650 |
if (dc.icount) {
|
2651 |
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount); |
2652 |
} |
2653 |
if (env->singlestep_enabled) {
|
2654 |
tcg_gen_movi_i32(cpu_pc, dc.pc); |
2655 |
gen_exception(&dc, EXCP_DEBUG); |
2656 |
break;
|
2657 |
} |
2658 |
} while (dc.is_jmp == DISAS_NEXT &&
|
2659 |
insn_count < max_insns && |
2660 |
dc.pc < next_page_start && |
2661 |
gen_opc_ptr < gen_opc_end); |
2662 |
|
2663 |
reset_litbase(&dc); |
2664 |
reset_sar_tracker(&dc); |
2665 |
if (dc.icount) {
|
2666 |
tcg_temp_free(dc.next_icount); |
2667 |
} |
2668 |
|
2669 |
if (tb->cflags & CF_LAST_IO) {
|
2670 |
gen_io_end(); |
2671 |
} |
2672 |
|
2673 |
if (dc.is_jmp == DISAS_NEXT) {
|
2674 |
gen_jumpi(&dc, dc.pc, 0);
|
2675 |
} |
2676 |
gen_icount_end(tb, insn_count); |
2677 |
*gen_opc_ptr = INDEX_op_end; |
2678 |
|
2679 |
if (!search_pc) {
|
2680 |
tb->size = dc.pc - pc_start; |
2681 |
tb->icount = insn_count; |
2682 |
} |
2683 |
} |
2684 |
|
2685 |
void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
|
2686 |
{ |
2687 |
gen_intermediate_code_internal(env, tb, 0);
|
2688 |
} |
2689 |
|
2690 |
void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
|
2691 |
{ |
2692 |
gen_intermediate_code_internal(env, tb, 1);
|
2693 |
} |
2694 |
|
2695 |
void cpu_dump_state(CPUXtensaState *env, FILE *f, fprintf_function cpu_fprintf,
|
2696 |
int flags)
|
2697 |
{ |
2698 |
int i, j;
|
2699 |
|
2700 |
cpu_fprintf(f, "PC=%08x\n\n", env->pc);
|
2701 |
|
2702 |
for (i = j = 0; i < 256; ++i) { |
2703 |
if (sregnames[i]) {
|
2704 |
cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
|
2705 |
(j++ % 4) == 3 ? '\n' : ' '); |
2706 |
} |
2707 |
} |
2708 |
|
2709 |
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); |
2710 |
|
2711 |
for (i = j = 0; i < 256; ++i) { |
2712 |
if (uregnames[i]) {
|
2713 |
cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
|
2714 |
(j++ % 4) == 3 ? '\n' : ' '); |
2715 |
} |
2716 |
} |
2717 |
|
2718 |
cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); |
2719 |
|
2720 |
for (i = 0; i < 16; ++i) { |
2721 |
cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
|
2722 |
(i % 4) == 3 ? '\n' : ' '); |
2723 |
} |
2724 |
|
2725 |
cpu_fprintf(f, "\n");
|
2726 |
|
2727 |
for (i = 0; i < env->config->nareg; ++i) { |
2728 |
cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
|
2729 |
(i % 4) == 3 ? '\n' : ' '); |
2730 |
} |
2731 |
} |
2732 |
|
2733 |
void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos) |
2734 |
{ |
2735 |
env->pc = gen_opc_pc[pc_pos]; |
2736 |
} |