root / target-xtensa / op_helper.c @ 1dc324d2
History | View | Annotate | Download (21.3 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
|
3 |
* All rights reserved.
|
4 |
*
|
5 |
* Redistribution and use in source and binary forms, with or without
|
6 |
* modification, are permitted provided that the following conditions are met:
|
7 |
* * Redistributions of source code must retain the above copyright
|
8 |
* notice, this list of conditions and the following disclaimer.
|
9 |
* * Redistributions in binary form must reproduce the above copyright
|
10 |
* notice, this list of conditions and the following disclaimer in the
|
11 |
* documentation and/or other materials provided with the distribution.
|
12 |
* * Neither the name of the Open Source and Linux Lab nor the
|
13 |
* names of its contributors may be used to endorse or promote products
|
14 |
* derived from this software without specific prior written permission.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
17 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
18 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
19 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
20 |
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*/
|
27 |
|
28 |
#include "cpu.h" |
29 |
#include "dyngen-exec.h" |
30 |
#include "helpers.h" |
31 |
#include "host-utils.h" |
32 |
|
33 |
static void do_unaligned_access(target_ulong addr, int is_write, int is_user, |
34 |
void *retaddr);
|
35 |
|
36 |
#define ALIGNED_ONLY
|
37 |
#define MMUSUFFIX _mmu
|
38 |
|
39 |
#define SHIFT 0 |
40 |
#include "softmmu_template.h" |
41 |
|
42 |
#define SHIFT 1 |
43 |
#include "softmmu_template.h" |
44 |
|
45 |
#define SHIFT 2 |
46 |
#include "softmmu_template.h" |
47 |
|
48 |
#define SHIFT 3 |
49 |
#include "softmmu_template.h" |
50 |
|
51 |
static void do_restore_state(void *pc_ptr) |
52 |
{ |
53 |
TranslationBlock *tb; |
54 |
uint32_t pc = (uint32_t)(intptr_t)pc_ptr; |
55 |
|
56 |
tb = tb_find_pc(pc); |
57 |
if (tb) {
|
58 |
cpu_restore_state(tb, env, pc); |
59 |
} |
60 |
} |
61 |
|
62 |
static void do_unaligned_access(target_ulong addr, int is_write, int is_user, |
63 |
void *retaddr)
|
64 |
{ |
65 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
|
66 |
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { |
67 |
do_restore_state(retaddr); |
68 |
HELPER(exception_cause_vaddr)( |
69 |
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); |
70 |
} |
71 |
} |
72 |
|
73 |
void tlb_fill(CPUXtensaState *env1, target_ulong vaddr, int is_write, int mmu_idx, |
74 |
void *retaddr)
|
75 |
{ |
76 |
CPUXtensaState *saved_env = env; |
77 |
|
78 |
env = env1; |
79 |
{ |
80 |
uint32_t paddr; |
81 |
uint32_t page_size; |
82 |
unsigned access;
|
83 |
int ret = xtensa_get_physical_addr(env, vaddr, is_write, mmu_idx,
|
84 |
&paddr, &page_size, &access); |
85 |
|
86 |
qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__,
|
87 |
vaddr, is_write, mmu_idx, paddr, ret); |
88 |
|
89 |
if (ret == 0) { |
90 |
tlb_set_page(env, |
91 |
vaddr & TARGET_PAGE_MASK, |
92 |
paddr & TARGET_PAGE_MASK, |
93 |
access, mmu_idx, page_size); |
94 |
} else {
|
95 |
do_restore_state(retaddr); |
96 |
HELPER(exception_cause_vaddr)(env->pc, ret, vaddr); |
97 |
} |
98 |
} |
99 |
env = saved_env; |
100 |
} |
101 |
|
102 |
void HELPER(exception)(uint32_t excp)
|
103 |
{ |
104 |
env->exception_index = excp; |
105 |
cpu_loop_exit(env); |
106 |
} |
107 |
|
108 |
void HELPER(exception_cause)(uint32_t pc, uint32_t cause)
|
109 |
{ |
110 |
uint32_t vector; |
111 |
|
112 |
env->pc = pc; |
113 |
if (env->sregs[PS] & PS_EXCM) {
|
114 |
if (env->config->ndepc) {
|
115 |
env->sregs[DEPC] = pc; |
116 |
} else {
|
117 |
env->sregs[EPC1] = pc; |
118 |
} |
119 |
vector = EXC_DOUBLE; |
120 |
} else {
|
121 |
env->sregs[EPC1] = pc; |
122 |
vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; |
123 |
} |
124 |
|
125 |
env->sregs[EXCCAUSE] = cause; |
126 |
env->sregs[PS] |= PS_EXCM; |
127 |
|
128 |
HELPER(exception)(vector); |
129 |
} |
130 |
|
131 |
void HELPER(exception_cause_vaddr)(uint32_t pc, uint32_t cause, uint32_t vaddr)
|
132 |
{ |
133 |
env->sregs[EXCVADDR] = vaddr; |
134 |
HELPER(exception_cause)(pc, cause); |
135 |
} |
136 |
|
137 |
void debug_exception_env(CPUXtensaState *new_env, uint32_t cause)
|
138 |
{ |
139 |
if (xtensa_get_cintlevel(new_env) < new_env->config->debug_level) {
|
140 |
env = new_env; |
141 |
HELPER(debug_exception)(env->pc, cause); |
142 |
} |
143 |
} |
144 |
|
145 |
void HELPER(debug_exception)(uint32_t pc, uint32_t cause)
|
146 |
{ |
147 |
unsigned level = env->config->debug_level;
|
148 |
|
149 |
env->pc = pc; |
150 |
env->sregs[DEBUGCAUSE] = cause; |
151 |
env->sregs[EPC1 + level - 1] = pc;
|
152 |
env->sregs[EPS2 + level - 2] = env->sregs[PS];
|
153 |
env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM | |
154 |
(level << PS_INTLEVEL_SHIFT); |
155 |
HELPER(exception)(EXC_DEBUG); |
156 |
} |
157 |
|
158 |
uint32_t HELPER(nsa)(uint32_t v) |
159 |
{ |
160 |
if (v & 0x80000000) { |
161 |
v = ~v; |
162 |
} |
163 |
return v ? clz32(v) - 1 : 31; |
164 |
} |
165 |
|
166 |
uint32_t HELPER(nsau)(uint32_t v) |
167 |
{ |
168 |
return v ? clz32(v) : 32; |
169 |
} |
170 |
|
171 |
static void copy_window_from_phys(CPUXtensaState *env, |
172 |
uint32_t window, uint32_t phys, uint32_t n) |
173 |
{ |
174 |
assert(phys < env->config->nareg); |
175 |
if (phys + n <= env->config->nareg) {
|
176 |
memcpy(env->regs + window, env->phys_regs + phys, |
177 |
n * sizeof(uint32_t));
|
178 |
} else {
|
179 |
uint32_t n1 = env->config->nareg - phys; |
180 |
memcpy(env->regs + window, env->phys_regs + phys, |
181 |
n1 * sizeof(uint32_t));
|
182 |
memcpy(env->regs + window + n1, env->phys_regs, |
183 |
(n - n1) * sizeof(uint32_t));
|
184 |
} |
185 |
} |
186 |
|
187 |
static void copy_phys_from_window(CPUXtensaState *env, |
188 |
uint32_t phys, uint32_t window, uint32_t n) |
189 |
{ |
190 |
assert(phys < env->config->nareg); |
191 |
if (phys + n <= env->config->nareg) {
|
192 |
memcpy(env->phys_regs + phys, env->regs + window, |
193 |
n * sizeof(uint32_t));
|
194 |
} else {
|
195 |
uint32_t n1 = env->config->nareg - phys; |
196 |
memcpy(env->phys_regs + phys, env->regs + window, |
197 |
n1 * sizeof(uint32_t));
|
198 |
memcpy(env->phys_regs, env->regs + window + n1, |
199 |
(n - n1) * sizeof(uint32_t));
|
200 |
} |
201 |
} |
202 |
|
203 |
|
204 |
static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env) |
205 |
{ |
206 |
return a & (env->config->nareg / 4 - 1); |
207 |
} |
208 |
|
209 |
static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env) |
210 |
{ |
211 |
return 1 << windowbase_bound(a, env); |
212 |
} |
213 |
|
214 |
void xtensa_sync_window_from_phys(CPUXtensaState *env)
|
215 |
{ |
216 |
copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16); |
217 |
} |
218 |
|
219 |
void xtensa_sync_phys_from_window(CPUXtensaState *env)
|
220 |
{ |
221 |
copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16); |
222 |
} |
223 |
|
224 |
static void rotate_window_abs(uint32_t position) |
225 |
{ |
226 |
xtensa_sync_phys_from_window(env); |
227 |
env->sregs[WINDOW_BASE] = windowbase_bound(position, env); |
228 |
xtensa_sync_window_from_phys(env); |
229 |
} |
230 |
|
231 |
static void rotate_window(uint32_t delta) |
232 |
{ |
233 |
rotate_window_abs(env->sregs[WINDOW_BASE] + delta); |
234 |
} |
235 |
|
236 |
void HELPER(wsr_windowbase)(uint32_t v)
|
237 |
{ |
238 |
rotate_window_abs(v); |
239 |
} |
240 |
|
241 |
void HELPER(entry)(uint32_t pc, uint32_t s, uint32_t imm)
|
242 |
{ |
243 |
int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
|
244 |
if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { |
245 |
qemu_log("Illegal entry instruction(pc = %08x), PS = %08x\n",
|
246 |
pc, env->sregs[PS]); |
247 |
HELPER(exception_cause)(pc, ILLEGAL_INSTRUCTION_CAUSE); |
248 |
} else {
|
249 |
env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3); |
250 |
rotate_window(callinc); |
251 |
env->sregs[WINDOW_START] |= |
252 |
windowstart_bit(env->sregs[WINDOW_BASE], env); |
253 |
} |
254 |
} |
255 |
|
256 |
void HELPER(window_check)(uint32_t pc, uint32_t w)
|
257 |
{ |
258 |
uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); |
259 |
uint32_t windowstart = env->sregs[WINDOW_START]; |
260 |
uint32_t m, n; |
261 |
|
262 |
if ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) {
|
263 |
return;
|
264 |
} |
265 |
|
266 |
for (n = 1; ; ++n) { |
267 |
if (n > w) {
|
268 |
return;
|
269 |
} |
270 |
if (windowstart & windowstart_bit(windowbase + n, env)) {
|
271 |
break;
|
272 |
} |
273 |
} |
274 |
|
275 |
m = windowbase_bound(windowbase + n, env); |
276 |
rotate_window(n); |
277 |
env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | |
278 |
(windowbase << PS_OWB_SHIFT) | PS_EXCM; |
279 |
env->sregs[EPC1] = env->pc = pc; |
280 |
|
281 |
if (windowstart & windowstart_bit(m + 1, env)) { |
282 |
HELPER(exception)(EXC_WINDOW_OVERFLOW4); |
283 |
} else if (windowstart & windowstart_bit(m + 2, env)) { |
284 |
HELPER(exception)(EXC_WINDOW_OVERFLOW8); |
285 |
} else {
|
286 |
HELPER(exception)(EXC_WINDOW_OVERFLOW12); |
287 |
} |
288 |
} |
289 |
|
290 |
uint32_t HELPER(retw)(uint32_t pc) |
291 |
{ |
292 |
int n = (env->regs[0] >> 30) & 0x3; |
293 |
int m = 0; |
294 |
uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); |
295 |
uint32_t windowstart = env->sregs[WINDOW_START]; |
296 |
uint32_t ret_pc = 0;
|
297 |
|
298 |
if (windowstart & windowstart_bit(windowbase - 1, env)) { |
299 |
m = 1;
|
300 |
} else if (windowstart & windowstart_bit(windowbase - 2, env)) { |
301 |
m = 2;
|
302 |
} else if (windowstart & windowstart_bit(windowbase - 3, env)) { |
303 |
m = 3;
|
304 |
} |
305 |
|
306 |
if (n == 0 || (m != 0 && m != n) || |
307 |
((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
|
308 |
qemu_log("Illegal retw instruction(pc = %08x), "
|
309 |
"PS = %08x, m = %d, n = %d\n",
|
310 |
pc, env->sregs[PS], m, n); |
311 |
HELPER(exception_cause)(pc, ILLEGAL_INSTRUCTION_CAUSE); |
312 |
} else {
|
313 |
int owb = windowbase;
|
314 |
|
315 |
ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff); |
316 |
|
317 |
rotate_window(-n); |
318 |
if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
|
319 |
env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env); |
320 |
} else {
|
321 |
/* window underflow */
|
322 |
env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | |
323 |
(windowbase << PS_OWB_SHIFT) | PS_EXCM; |
324 |
env->sregs[EPC1] = env->pc = pc; |
325 |
|
326 |
if (n == 1) { |
327 |
HELPER(exception)(EXC_WINDOW_UNDERFLOW4); |
328 |
} else if (n == 2) { |
329 |
HELPER(exception)(EXC_WINDOW_UNDERFLOW8); |
330 |
} else if (n == 3) { |
331 |
HELPER(exception)(EXC_WINDOW_UNDERFLOW12); |
332 |
} |
333 |
} |
334 |
} |
335 |
return ret_pc;
|
336 |
} |
337 |
|
338 |
void HELPER(rotw)(uint32_t imm4)
|
339 |
{ |
340 |
rotate_window(imm4); |
341 |
} |
342 |
|
343 |
void HELPER(restore_owb)(void) |
344 |
{ |
345 |
rotate_window_abs((env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); |
346 |
} |
347 |
|
348 |
void HELPER(movsp)(uint32_t pc)
|
349 |
{ |
350 |
if ((env->sregs[WINDOW_START] &
|
351 |
(windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
|
352 |
windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
|
353 |
windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) { |
354 |
HELPER(exception_cause)(pc, ALLOCA_CAUSE); |
355 |
} |
356 |
} |
357 |
|
358 |
void HELPER(wsr_lbeg)(uint32_t v)
|
359 |
{ |
360 |
if (env->sregs[LBEG] != v) {
|
361 |
tb_invalidate_phys_page_range( |
362 |
env->sregs[LEND] - 1, env->sregs[LEND], 0); |
363 |
env->sregs[LBEG] = v; |
364 |
} |
365 |
} |
366 |
|
367 |
void HELPER(wsr_lend)(uint32_t v)
|
368 |
{ |
369 |
if (env->sregs[LEND] != v) {
|
370 |
tb_invalidate_phys_page_range( |
371 |
env->sregs[LEND] - 1, env->sregs[LEND], 0); |
372 |
env->sregs[LEND] = v; |
373 |
tb_invalidate_phys_page_range( |
374 |
env->sregs[LEND] - 1, env->sregs[LEND], 0); |
375 |
} |
376 |
} |
377 |
|
378 |
void HELPER(dump_state)(void) |
379 |
{ |
380 |
cpu_dump_state(env, stderr, fprintf, 0);
|
381 |
} |
382 |
|
383 |
void HELPER(waiti)(uint32_t pc, uint32_t intlevel)
|
384 |
{ |
385 |
env->pc = pc; |
386 |
env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | |
387 |
(intlevel << PS_INTLEVEL_SHIFT); |
388 |
check_interrupts(env); |
389 |
if (env->pending_irq_level) {
|
390 |
cpu_loop_exit(env); |
391 |
return;
|
392 |
} |
393 |
|
394 |
env->halt_clock = qemu_get_clock_ns(vm_clock); |
395 |
env->halted = 1;
|
396 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) {
|
397 |
xtensa_rearm_ccompare_timer(env); |
398 |
} |
399 |
HELPER(exception)(EXCP_HLT); |
400 |
} |
401 |
|
402 |
void HELPER(timer_irq)(uint32_t id, uint32_t active)
|
403 |
{ |
404 |
xtensa_timer_irq(env, id, active); |
405 |
} |
406 |
|
407 |
void HELPER(advance_ccount)(uint32_t d)
|
408 |
{ |
409 |
xtensa_advance_ccount(env, d); |
410 |
} |
411 |
|
412 |
void HELPER(check_interrupts)(CPUXtensaState *env)
|
413 |
{ |
414 |
check_interrupts(env); |
415 |
} |
416 |
|
417 |
void HELPER(wsr_rasid)(uint32_t v)
|
418 |
{ |
419 |
v = (v & 0xffffff00) | 0x1; |
420 |
if (v != env->sregs[RASID]) {
|
421 |
env->sregs[RASID] = v; |
422 |
tlb_flush(env, 1);
|
423 |
} |
424 |
} |
425 |
|
426 |
static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way) |
427 |
{ |
428 |
uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; |
429 |
|
430 |
switch (way) {
|
431 |
case 4: |
432 |
return (tlbcfg >> 16) & 0x3; |
433 |
|
434 |
case 5: |
435 |
return (tlbcfg >> 20) & 0x1; |
436 |
|
437 |
case 6: |
438 |
return (tlbcfg >> 24) & 0x1; |
439 |
|
440 |
default:
|
441 |
return 0; |
442 |
} |
443 |
} |
444 |
|
445 |
/*!
|
446 |
* Get bit mask for the virtual address bits translated by the TLB way
|
447 |
*/
|
448 |
uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) |
449 |
{ |
450 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
451 |
bool varway56 = dtlb ?
|
452 |
env->config->dtlb.varway56 : |
453 |
env->config->itlb.varway56; |
454 |
|
455 |
switch (way) {
|
456 |
case 4: |
457 |
return 0xfff00000 << get_page_size(env, dtlb, way) * 2; |
458 |
|
459 |
case 5: |
460 |
if (varway56) {
|
461 |
return 0xf8000000 << get_page_size(env, dtlb, way); |
462 |
} else {
|
463 |
return 0xf8000000; |
464 |
} |
465 |
|
466 |
case 6: |
467 |
if (varway56) {
|
468 |
return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); |
469 |
} else {
|
470 |
return 0xf0000000; |
471 |
} |
472 |
|
473 |
default:
|
474 |
return 0xfffff000; |
475 |
} |
476 |
} else {
|
477 |
return REGION_PAGE_MASK;
|
478 |
} |
479 |
} |
480 |
|
481 |
/*!
|
482 |
* Get bit mask for the 'VPN without index' field.
|
483 |
* See ISA, 4.6.5.6, data format for RxTLB0
|
484 |
*/
|
485 |
static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) |
486 |
{ |
487 |
if (way < 4) { |
488 |
bool is32 = (dtlb ?
|
489 |
env->config->dtlb.nrefillentries : |
490 |
env->config->itlb.nrefillentries) == 32;
|
491 |
return is32 ? 0xffff8000 : 0xffffc000; |
492 |
} else if (way == 4) { |
493 |
return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; |
494 |
} else if (way <= 6) { |
495 |
uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); |
496 |
bool varway56 = dtlb ?
|
497 |
env->config->dtlb.varway56 : |
498 |
env->config->itlb.varway56; |
499 |
|
500 |
if (varway56) {
|
501 |
return mask << (way == 5 ? 2 : 3); |
502 |
} else {
|
503 |
return mask << 1; |
504 |
} |
505 |
} else {
|
506 |
return 0xfffff000; |
507 |
} |
508 |
} |
509 |
|
510 |
/*!
|
511 |
* Split virtual address into VPN (with index) and entry index
|
512 |
* for the given TLB way
|
513 |
*/
|
514 |
void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, |
515 |
uint32_t *vpn, uint32_t wi, uint32_t *ei) |
516 |
{ |
517 |
bool varway56 = dtlb ?
|
518 |
env->config->dtlb.varway56 : |
519 |
env->config->itlb.varway56; |
520 |
|
521 |
if (!dtlb) {
|
522 |
wi &= 7;
|
523 |
} |
524 |
|
525 |
if (wi < 4) { |
526 |
bool is32 = (dtlb ?
|
527 |
env->config->dtlb.nrefillentries : |
528 |
env->config->itlb.nrefillentries) == 32;
|
529 |
*ei = (v >> 12) & (is32 ? 0x7 : 0x3); |
530 |
} else {
|
531 |
switch (wi) {
|
532 |
case 4: |
533 |
{ |
534 |
uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; |
535 |
*ei = (v >> eibase) & 0x3;
|
536 |
} |
537 |
break;
|
538 |
|
539 |
case 5: |
540 |
if (varway56) {
|
541 |
uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
|
542 |
*ei = (v >> eibase) & 0x3;
|
543 |
} else {
|
544 |
*ei = (v >> 27) & 0x1; |
545 |
} |
546 |
break;
|
547 |
|
548 |
case 6: |
549 |
if (varway56) {
|
550 |
uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
|
551 |
*ei = (v >> eibase) & 0x7;
|
552 |
} else {
|
553 |
*ei = (v >> 28) & 0x1; |
554 |
} |
555 |
break;
|
556 |
|
557 |
default:
|
558 |
*ei = 0;
|
559 |
break;
|
560 |
} |
561 |
} |
562 |
*vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); |
563 |
} |
564 |
|
565 |
/*!
|
566 |
* Split TLB address into TLB way, entry index and VPN (with index).
|
567 |
* See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
|
568 |
*/
|
569 |
static void split_tlb_entry_spec(uint32_t v, bool dtlb, |
570 |
uint32_t *vpn, uint32_t *wi, uint32_t *ei) |
571 |
{ |
572 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
573 |
*wi = v & (dtlb ? 0xf : 0x7); |
574 |
split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); |
575 |
} else {
|
576 |
*vpn = v & REGION_PAGE_MASK; |
577 |
*wi = 0;
|
578 |
*ei = (v >> 29) & 0x7; |
579 |
} |
580 |
} |
581 |
|
582 |
static xtensa_tlb_entry *get_tlb_entry(uint32_t v, bool dtlb, uint32_t *pwi) |
583 |
{ |
584 |
uint32_t vpn; |
585 |
uint32_t wi; |
586 |
uint32_t ei; |
587 |
|
588 |
split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei); |
589 |
if (pwi) {
|
590 |
*pwi = wi; |
591 |
} |
592 |
return xtensa_tlb_get_entry(env, dtlb, wi, ei);
|
593 |
} |
594 |
|
595 |
uint32_t HELPER(rtlb0)(uint32_t v, uint32_t dtlb) |
596 |
{ |
597 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
598 |
uint32_t wi; |
599 |
const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi);
|
600 |
return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
|
601 |
} else {
|
602 |
return v & REGION_PAGE_MASK;
|
603 |
} |
604 |
} |
605 |
|
606 |
uint32_t HELPER(rtlb1)(uint32_t v, uint32_t dtlb) |
607 |
{ |
608 |
const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, NULL); |
609 |
return entry->paddr | entry->attr;
|
610 |
} |
611 |
|
612 |
void HELPER(itlb)(uint32_t v, uint32_t dtlb)
|
613 |
{ |
614 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
615 |
uint32_t wi; |
616 |
xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi); |
617 |
if (entry->variable && entry->asid) {
|
618 |
tlb_flush_page(env, entry->vaddr); |
619 |
entry->asid = 0;
|
620 |
} |
621 |
} |
622 |
} |
623 |
|
624 |
uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb) |
625 |
{ |
626 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
627 |
uint32_t wi; |
628 |
uint32_t ei; |
629 |
uint8_t ring; |
630 |
int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
|
631 |
|
632 |
switch (res) {
|
633 |
case 0: |
634 |
if (ring >= xtensa_get_ring(env)) {
|
635 |
return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); |
636 |
} |
637 |
break;
|
638 |
|
639 |
case INST_TLB_MULTI_HIT_CAUSE:
|
640 |
case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
|
641 |
HELPER(exception_cause_vaddr)(env->pc, res, v); |
642 |
break;
|
643 |
} |
644 |
return 0; |
645 |
} else {
|
646 |
return (v & REGION_PAGE_MASK) | 0x1; |
647 |
} |
648 |
} |
649 |
|
650 |
void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, |
651 |
unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) |
652 |
{ |
653 |
xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); |
654 |
|
655 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
656 |
if (entry->variable) {
|
657 |
if (entry->asid) {
|
658 |
tlb_flush_page(env, entry->vaddr); |
659 |
} |
660 |
entry->vaddr = vpn; |
661 |
entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); |
662 |
entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; |
663 |
entry->attr = pte & 0xf;
|
664 |
} else {
|
665 |
qemu_log("%s %d, %d, %d trying to set immutable entry\n",
|
666 |
__func__, dtlb, wi, ei); |
667 |
} |
668 |
} else {
|
669 |
tlb_flush_page(env, entry->vaddr); |
670 |
if (xtensa_option_enabled(env->config,
|
671 |
XTENSA_OPTION_REGION_TRANSLATION)) { |
672 |
entry->paddr = pte & REGION_PAGE_MASK; |
673 |
} |
674 |
entry->attr = pte & 0xf;
|
675 |
} |
676 |
} |
677 |
|
678 |
void HELPER(wtlb)(uint32_t p, uint32_t v, uint32_t dtlb)
|
679 |
{ |
680 |
uint32_t vpn; |
681 |
uint32_t wi; |
682 |
uint32_t ei; |
683 |
split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei); |
684 |
xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); |
685 |
} |
686 |
|
687 |
|
688 |
void HELPER(wsr_ibreakenable)(uint32_t v)
|
689 |
{ |
690 |
uint32_t change = v ^ env->sregs[IBREAKENABLE]; |
691 |
unsigned i;
|
692 |
|
693 |
for (i = 0; i < env->config->nibreak; ++i) { |
694 |
if (change & (1 << i)) { |
695 |
tb_invalidate_phys_page_range( |
696 |
env->sregs[IBREAKA + i], env->sregs[IBREAKA + i] + 1, 0); |
697 |
} |
698 |
} |
699 |
env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); |
700 |
} |
701 |
|
702 |
void HELPER(wsr_ibreaka)(uint32_t i, uint32_t v)
|
703 |
{ |
704 |
if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { |
705 |
tb_invalidate_phys_page_range( |
706 |
env->sregs[IBREAKA + i], env->sregs[IBREAKA + i] + 1, 0); |
707 |
tb_invalidate_phys_page_range(v, v + 1, 0); |
708 |
} |
709 |
env->sregs[IBREAKA + i] = v; |
710 |
} |
711 |
|
712 |
static void set_dbreak(unsigned i, uint32_t dbreaka, uint32_t dbreakc) |
713 |
{ |
714 |
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
|
715 |
uint32_t mask = dbreakc | ~DBREAKC_MASK; |
716 |
|
717 |
if (env->cpu_watchpoint[i]) {
|
718 |
cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]); |
719 |
} |
720 |
if (dbreakc & DBREAKC_SB) {
|
721 |
flags |= BP_MEM_WRITE; |
722 |
} |
723 |
if (dbreakc & DBREAKC_LB) {
|
724 |
flags |= BP_MEM_READ; |
725 |
} |
726 |
/* contiguous mask after inversion is one less than some power of 2 */
|
727 |
if ((~mask + 1) & ~mask) { |
728 |
qemu_log("DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
|
729 |
/* cut mask after the first zero bit */
|
730 |
mask = 0xffffffff << (32 - clo32(mask)); |
731 |
} |
732 |
if (cpu_watchpoint_insert(env, dbreaka & mask, ~mask + 1, |
733 |
flags, &env->cpu_watchpoint[i])) { |
734 |
env->cpu_watchpoint[i] = NULL;
|
735 |
qemu_log("Failed to set data breakpoint at 0x%08x/%d\n",
|
736 |
dbreaka & mask, ~mask + 1);
|
737 |
} |
738 |
} |
739 |
|
740 |
void HELPER(wsr_dbreaka)(uint32_t i, uint32_t v)
|
741 |
{ |
742 |
uint32_t dbreakc = env->sregs[DBREAKC + i]; |
743 |
|
744 |
if ((dbreakc & DBREAKC_SB_LB) &&
|
745 |
env->sregs[DBREAKA + i] != v) { |
746 |
set_dbreak(i, v, dbreakc); |
747 |
} |
748 |
env->sregs[DBREAKA + i] = v; |
749 |
} |
750 |
|
751 |
void HELPER(wsr_dbreakc)(uint32_t i, uint32_t v)
|
752 |
{ |
753 |
if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
|
754 |
if (v & DBREAKC_SB_LB) {
|
755 |
set_dbreak(i, env->sregs[DBREAKA + i], v); |
756 |
} else {
|
757 |
if (env->cpu_watchpoint[i]) {
|
758 |
cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]); |
759 |
env->cpu_watchpoint[i] = NULL;
|
760 |
} |
761 |
} |
762 |
} |
763 |
env->sregs[DBREAKC + i] = v; |
764 |
} |