root / target-xtensa / helper.c @ b3ce604e
History | View | Annotate | Download (19.3 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
|
3 |
* All rights reserved.
|
4 |
*
|
5 |
* Redistribution and use in source and binary forms, with or without
|
6 |
* modification, are permitted provided that the following conditions are met:
|
7 |
* * Redistributions of source code must retain the above copyright
|
8 |
* notice, this list of conditions and the following disclaimer.
|
9 |
* * Redistributions in binary form must reproduce the above copyright
|
10 |
* notice, this list of conditions and the following disclaimer in the
|
11 |
* documentation and/or other materials provided with the distribution.
|
12 |
* * Neither the name of the Open Source and Linux Lab nor the
|
13 |
* names of its contributors may be used to endorse or promote products
|
14 |
* derived from this software without specific prior written permission.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
17 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
18 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
19 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
20 |
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*/
|
27 |
|
28 |
#include "cpu.h" |
29 |
#include "exec-all.h" |
30 |
#include "gdbstub.h" |
31 |
#include "host-utils.h" |
32 |
#if !defined(CONFIG_USER_ONLY)
|
33 |
#include "hw/loader.h" |
34 |
#endif
|
35 |
|
36 |
static struct XtensaConfigList *xtensa_cores; |
37 |
|
38 |
void xtensa_register_core(XtensaConfigList *node)
|
39 |
{ |
40 |
node->next = xtensa_cores; |
41 |
xtensa_cores = node; |
42 |
} |
43 |
|
44 |
static uint32_t check_hw_breakpoints(CPUXtensaState *env)
|
45 |
{ |
46 |
unsigned i;
|
47 |
|
48 |
for (i = 0; i < env->config->ndbreak; ++i) { |
49 |
if (env->cpu_watchpoint[i] &&
|
50 |
env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) { |
51 |
return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
|
52 |
} |
53 |
} |
54 |
return 0; |
55 |
} |
56 |
|
57 |
static CPUDebugExcpHandler *prev_debug_excp_handler;
|
58 |
|
59 |
static void breakpoint_handler(CPUXtensaState *env) |
60 |
{ |
61 |
if (env->watchpoint_hit) {
|
62 |
if (env->watchpoint_hit->flags & BP_CPU) {
|
63 |
uint32_t cause; |
64 |
|
65 |
env->watchpoint_hit = NULL;
|
66 |
cause = check_hw_breakpoints(env); |
67 |
if (cause) {
|
68 |
debug_exception_env(env, cause); |
69 |
} |
70 |
cpu_resume_from_signal(env, NULL);
|
71 |
} |
72 |
} |
73 |
if (prev_debug_excp_handler) {
|
74 |
prev_debug_excp_handler(env); |
75 |
} |
76 |
} |
77 |
|
78 |
XtensaCPU *cpu_xtensa_init(const char *cpu_model) |
79 |
{ |
80 |
static int tcg_inited; |
81 |
static int debug_handler_inited; |
82 |
XtensaCPU *cpu; |
83 |
CPUXtensaState *env; |
84 |
const XtensaConfig *config = NULL; |
85 |
XtensaConfigList *core = xtensa_cores; |
86 |
|
87 |
for (; core; core = core->next)
|
88 |
if (strcmp(core->config->name, cpu_model) == 0) { |
89 |
config = core->config; |
90 |
break;
|
91 |
} |
92 |
|
93 |
if (config == NULL) { |
94 |
return NULL; |
95 |
} |
96 |
|
97 |
cpu = XTENSA_CPU(object_new(TYPE_XTENSA_CPU)); |
98 |
env = &cpu->env; |
99 |
env->config = config; |
100 |
|
101 |
if (!tcg_inited) {
|
102 |
tcg_inited = 1;
|
103 |
xtensa_translate_init(); |
104 |
} |
105 |
|
106 |
if (!debug_handler_inited && tcg_enabled()) {
|
107 |
debug_handler_inited = 1;
|
108 |
prev_debug_excp_handler = |
109 |
cpu_set_debug_excp_handler(breakpoint_handler); |
110 |
} |
111 |
|
112 |
xtensa_irq_init(env); |
113 |
qemu_init_vcpu(env); |
114 |
return cpu;
|
115 |
} |
116 |
|
117 |
|
118 |
void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
|
119 |
{ |
120 |
XtensaConfigList *core = xtensa_cores; |
121 |
cpu_fprintf(f, "Available CPUs:\n");
|
122 |
for (; core; core = core->next) {
|
123 |
cpu_fprintf(f, " %s\n", core->config->name);
|
124 |
} |
125 |
} |
126 |
|
127 |
target_phys_addr_t cpu_get_phys_page_debug(CPUXtensaState *env, target_ulong addr) |
128 |
{ |
129 |
uint32_t paddr; |
130 |
uint32_t page_size; |
131 |
unsigned access;
|
132 |
|
133 |
if (xtensa_get_physical_addr(env, false, addr, 0, 0, |
134 |
&paddr, &page_size, &access) == 0) {
|
135 |
return paddr;
|
136 |
} |
137 |
if (xtensa_get_physical_addr(env, false, addr, 2, 0, |
138 |
&paddr, &page_size, &access) == 0) {
|
139 |
return paddr;
|
140 |
} |
141 |
return ~0; |
142 |
} |
143 |
|
144 |
static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
|
145 |
{ |
146 |
if (xtensa_option_enabled(env->config,
|
147 |
XTENSA_OPTION_RELOCATABLE_VECTOR)) { |
148 |
return vector - env->config->vecbase + env->sregs[VECBASE];
|
149 |
} else {
|
150 |
return vector;
|
151 |
} |
152 |
} |
153 |
|
154 |
/*!
|
155 |
* Handle penging IRQ.
|
156 |
* For the high priority interrupt jump to the corresponding interrupt vector.
|
157 |
* For the level-1 interrupt convert it to either user, kernel or double
|
158 |
* exception with the 'level-1 interrupt' exception cause.
|
159 |
*/
|
160 |
static void handle_interrupt(CPUXtensaState *env) |
161 |
{ |
162 |
int level = env->pending_irq_level;
|
163 |
|
164 |
if (level > xtensa_get_cintlevel(env) &&
|
165 |
level <= env->config->nlevel && |
166 |
(env->config->level_mask[level] & |
167 |
env->sregs[INTSET] & |
168 |
env->sregs[INTENABLE])) { |
169 |
if (level > 1) { |
170 |
env->sregs[EPC1 + level - 1] = env->pc;
|
171 |
env->sregs[EPS2 + level - 2] = env->sregs[PS];
|
172 |
env->sregs[PS] = |
173 |
(env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM; |
174 |
env->pc = relocated_vector(env, |
175 |
env->config->interrupt_vector[level]); |
176 |
} else {
|
177 |
env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE; |
178 |
|
179 |
if (env->sregs[PS] & PS_EXCM) {
|
180 |
if (env->config->ndepc) {
|
181 |
env->sregs[DEPC] = env->pc; |
182 |
} else {
|
183 |
env->sregs[EPC1] = env->pc; |
184 |
} |
185 |
env->exception_index = EXC_DOUBLE; |
186 |
} else {
|
187 |
env->sregs[EPC1] = env->pc; |
188 |
env->exception_index = |
189 |
(env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; |
190 |
} |
191 |
env->sregs[PS] |= PS_EXCM; |
192 |
} |
193 |
env->exception_taken = 1;
|
194 |
} |
195 |
} |
196 |
|
197 |
void do_interrupt(CPUXtensaState *env)
|
198 |
{ |
199 |
if (env->exception_index == EXC_IRQ) {
|
200 |
qemu_log_mask(CPU_LOG_INT, |
201 |
"%s(EXC_IRQ) level = %d, cintlevel = %d, "
|
202 |
"pc = %08x, a0 = %08x, ps = %08x, "
|
203 |
"intset = %08x, intenable = %08x, "
|
204 |
"ccount = %08x\n",
|
205 |
__func__, env->pending_irq_level, xtensa_get_cintlevel(env), |
206 |
env->pc, env->regs[0], env->sregs[PS],
|
207 |
env->sregs[INTSET], env->sregs[INTENABLE], |
208 |
env->sregs[CCOUNT]); |
209 |
handle_interrupt(env); |
210 |
} |
211 |
|
212 |
switch (env->exception_index) {
|
213 |
case EXC_WINDOW_OVERFLOW4:
|
214 |
case EXC_WINDOW_UNDERFLOW4:
|
215 |
case EXC_WINDOW_OVERFLOW8:
|
216 |
case EXC_WINDOW_UNDERFLOW8:
|
217 |
case EXC_WINDOW_OVERFLOW12:
|
218 |
case EXC_WINDOW_UNDERFLOW12:
|
219 |
case EXC_KERNEL:
|
220 |
case EXC_USER:
|
221 |
case EXC_DOUBLE:
|
222 |
case EXC_DEBUG:
|
223 |
qemu_log_mask(CPU_LOG_INT, "%s(%d) "
|
224 |
"pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
|
225 |
__func__, env->exception_index, |
226 |
env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
|
227 |
if (env->config->exception_vector[env->exception_index]) {
|
228 |
env->pc = relocated_vector(env, |
229 |
env->config->exception_vector[env->exception_index]); |
230 |
env->exception_taken = 1;
|
231 |
} else {
|
232 |
qemu_log("%s(pc = %08x) bad exception_index: %d\n",
|
233 |
__func__, env->pc, env->exception_index); |
234 |
} |
235 |
break;
|
236 |
|
237 |
case EXC_IRQ:
|
238 |
break;
|
239 |
|
240 |
default:
|
241 |
qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
|
242 |
__func__, env->pc, env->exception_index); |
243 |
break;
|
244 |
} |
245 |
check_interrupts(env); |
246 |
} |
247 |
|
248 |
static void reset_tlb_mmu_all_ways(CPUXtensaState *env, |
249 |
const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
|
250 |
{ |
251 |
unsigned wi, ei;
|
252 |
|
253 |
for (wi = 0; wi < tlb->nways; ++wi) { |
254 |
for (ei = 0; ei < tlb->way_size[wi]; ++ei) { |
255 |
entry[wi][ei].asid = 0;
|
256 |
entry[wi][ei].variable = true;
|
257 |
} |
258 |
} |
259 |
} |
260 |
|
261 |
static void reset_tlb_mmu_ways56(CPUXtensaState *env, |
262 |
const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
|
263 |
{ |
264 |
if (!tlb->varway56) {
|
265 |
static const xtensa_tlb_entry way5[] = { |
266 |
{ |
267 |
.vaddr = 0xd0000000,
|
268 |
.paddr = 0,
|
269 |
.asid = 1,
|
270 |
.attr = 7,
|
271 |
.variable = false,
|
272 |
}, { |
273 |
.vaddr = 0xd8000000,
|
274 |
.paddr = 0,
|
275 |
.asid = 1,
|
276 |
.attr = 3,
|
277 |
.variable = false,
|
278 |
} |
279 |
}; |
280 |
static const xtensa_tlb_entry way6[] = { |
281 |
{ |
282 |
.vaddr = 0xe0000000,
|
283 |
.paddr = 0xf0000000,
|
284 |
.asid = 1,
|
285 |
.attr = 7,
|
286 |
.variable = false,
|
287 |
}, { |
288 |
.vaddr = 0xf0000000,
|
289 |
.paddr = 0xf0000000,
|
290 |
.asid = 1,
|
291 |
.attr = 3,
|
292 |
.variable = false,
|
293 |
} |
294 |
}; |
295 |
memcpy(entry[5], way5, sizeof(way5)); |
296 |
memcpy(entry[6], way6, sizeof(way6)); |
297 |
} else {
|
298 |
uint32_t ei; |
299 |
for (ei = 0; ei < 8; ++ei) { |
300 |
entry[6][ei].vaddr = ei << 29; |
301 |
entry[6][ei].paddr = ei << 29; |
302 |
entry[6][ei].asid = 1; |
303 |
entry[6][ei].attr = 3; |
304 |
} |
305 |
} |
306 |
} |
307 |
|
308 |
static void reset_tlb_region_way0(CPUXtensaState *env, |
309 |
xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
310 |
{ |
311 |
unsigned ei;
|
312 |
|
313 |
for (ei = 0; ei < 8; ++ei) { |
314 |
entry[0][ei].vaddr = ei << 29; |
315 |
entry[0][ei].paddr = ei << 29; |
316 |
entry[0][ei].asid = 1; |
317 |
entry[0][ei].attr = 2; |
318 |
entry[0][ei].variable = true; |
319 |
} |
320 |
} |
321 |
|
322 |
void reset_mmu(CPUXtensaState *env)
|
323 |
{ |
324 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
325 |
env->sregs[RASID] = 0x04030201;
|
326 |
env->sregs[ITLBCFG] = 0;
|
327 |
env->sregs[DTLBCFG] = 0;
|
328 |
env->autorefill_idx = 0;
|
329 |
reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); |
330 |
reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); |
331 |
reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); |
332 |
reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); |
333 |
} else {
|
334 |
reset_tlb_region_way0(env, env->itlb); |
335 |
reset_tlb_region_way0(env, env->dtlb); |
336 |
} |
337 |
} |
338 |
|
339 |
static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) |
340 |
{ |
341 |
unsigned i;
|
342 |
for (i = 0; i < 4; ++i) { |
343 |
if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { |
344 |
return i;
|
345 |
} |
346 |
} |
347 |
return 0xff; |
348 |
} |
349 |
|
350 |
/*!
|
351 |
* Lookup xtensa TLB for the given virtual address.
|
352 |
* See ISA, 4.6.2.2
|
353 |
*
|
354 |
* \param pwi: [out] way index
|
355 |
* \param pei: [out] entry index
|
356 |
* \param pring: [out] access ring
|
357 |
* \return 0 if ok, exception cause code otherwise
|
358 |
*/
|
359 |
int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, |
360 |
uint32_t *pwi, uint32_t *pei, uint8_t *pring) |
361 |
{ |
362 |
const xtensa_tlb *tlb = dtlb ?
|
363 |
&env->config->dtlb : &env->config->itlb; |
364 |
const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
|
365 |
env->dtlb : env->itlb; |
366 |
|
367 |
int nhits = 0; |
368 |
unsigned wi;
|
369 |
|
370 |
for (wi = 0; wi < tlb->nways; ++wi) { |
371 |
uint32_t vpn; |
372 |
uint32_t ei; |
373 |
split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); |
374 |
if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
|
375 |
unsigned ring = get_ring(env, entry[wi][ei].asid);
|
376 |
if (ring < 4) { |
377 |
if (++nhits > 1) { |
378 |
return dtlb ?
|
379 |
LOAD_STORE_TLB_MULTI_HIT_CAUSE : |
380 |
INST_TLB_MULTI_HIT_CAUSE; |
381 |
} |
382 |
*pwi = wi; |
383 |
*pei = ei; |
384 |
*pring = ring; |
385 |
} |
386 |
} |
387 |
} |
388 |
return nhits ? 0 : |
389 |
(dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); |
390 |
} |
391 |
|
392 |
/*!
|
393 |
* Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
|
394 |
* See ISA, 4.6.5.10
|
395 |
*/
|
396 |
static unsigned mmu_attr_to_access(uint32_t attr) |
397 |
{ |
398 |
unsigned access = 0; |
399 |
if (attr < 12) { |
400 |
access |= PAGE_READ; |
401 |
if (attr & 0x1) { |
402 |
access |= PAGE_EXEC; |
403 |
} |
404 |
if (attr & 0x2) { |
405 |
access |= PAGE_WRITE; |
406 |
} |
407 |
} else if (attr == 13) { |
408 |
access |= PAGE_READ | PAGE_WRITE; |
409 |
} |
410 |
return access;
|
411 |
} |
412 |
|
413 |
/*!
|
414 |
* Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
|
415 |
* See ISA, 4.6.3.3
|
416 |
*/
|
417 |
static unsigned region_attr_to_access(uint32_t attr) |
418 |
{ |
419 |
unsigned access = 0; |
420 |
if ((attr < 6 && attr != 3) || attr == 14) { |
421 |
access |= PAGE_READ | PAGE_WRITE; |
422 |
} |
423 |
if (attr > 0 && attr < 6) { |
424 |
access |= PAGE_EXEC; |
425 |
} |
426 |
return access;
|
427 |
} |
428 |
|
429 |
static bool is_access_granted(unsigned access, int is_write) |
430 |
{ |
431 |
switch (is_write) {
|
432 |
case 0: |
433 |
return access & PAGE_READ;
|
434 |
|
435 |
case 1: |
436 |
return access & PAGE_WRITE;
|
437 |
|
438 |
case 2: |
439 |
return access & PAGE_EXEC;
|
440 |
|
441 |
default:
|
442 |
return 0; |
443 |
} |
444 |
} |
445 |
|
446 |
static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); |
447 |
|
448 |
static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, |
449 |
uint32_t vaddr, int is_write, int mmu_idx, |
450 |
uint32_t *paddr, uint32_t *page_size, unsigned *access,
|
451 |
bool may_lookup_pt)
|
452 |
{ |
453 |
bool dtlb = is_write != 2; |
454 |
uint32_t wi; |
455 |
uint32_t ei; |
456 |
uint8_t ring; |
457 |
uint32_t vpn; |
458 |
uint32_t pte; |
459 |
const xtensa_tlb_entry *entry = NULL; |
460 |
xtensa_tlb_entry tmp_entry; |
461 |
int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
|
462 |
|
463 |
if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
|
464 |
may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
|
465 |
ring = (pte >> 4) & 0x3; |
466 |
wi = 0;
|
467 |
split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); |
468 |
|
469 |
if (update_tlb) {
|
470 |
wi = ++env->autorefill_idx & 0x3;
|
471 |
xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); |
472 |
env->sregs[EXCVADDR] = vaddr; |
473 |
qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
|
474 |
__func__, vaddr, vpn, pte); |
475 |
} else {
|
476 |
xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); |
477 |
entry = &tmp_entry; |
478 |
} |
479 |
ret = 0;
|
480 |
} |
481 |
if (ret != 0) { |
482 |
return ret;
|
483 |
} |
484 |
|
485 |
if (entry == NULL) { |
486 |
entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); |
487 |
} |
488 |
|
489 |
if (ring < mmu_idx) {
|
490 |
return dtlb ?
|
491 |
LOAD_STORE_PRIVILEGE_CAUSE : |
492 |
INST_FETCH_PRIVILEGE_CAUSE; |
493 |
} |
494 |
|
495 |
*access = mmu_attr_to_access(entry->attr); |
496 |
if (!is_access_granted(*access, is_write)) {
|
497 |
return dtlb ?
|
498 |
(is_write ? |
499 |
STORE_PROHIBITED_CAUSE : |
500 |
LOAD_PROHIBITED_CAUSE) : |
501 |
INST_FETCH_PROHIBITED_CAUSE; |
502 |
} |
503 |
|
504 |
*paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); |
505 |
*page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
|
506 |
|
507 |
return 0; |
508 |
} |
509 |
|
510 |
static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) |
511 |
{ |
512 |
uint32_t paddr; |
513 |
uint32_t page_size; |
514 |
unsigned access;
|
515 |
uint32_t pt_vaddr = |
516 |
(env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; |
517 |
int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, |
518 |
&paddr, &page_size, &access, false);
|
519 |
|
520 |
qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__,
|
521 |
vaddr, ret ? ~0 : paddr);
|
522 |
|
523 |
if (ret == 0) { |
524 |
*pte = ldl_phys(paddr); |
525 |
} |
526 |
return ret;
|
527 |
} |
528 |
|
529 |
static int get_physical_addr_region(CPUXtensaState *env, |
530 |
uint32_t vaddr, int is_write, int mmu_idx, |
531 |
uint32_t *paddr, uint32_t *page_size, unsigned *access)
|
532 |
{ |
533 |
bool dtlb = is_write != 2; |
534 |
uint32_t wi = 0;
|
535 |
uint32_t ei = (vaddr >> 29) & 0x7; |
536 |
const xtensa_tlb_entry *entry =
|
537 |
xtensa_tlb_get_entry(env, dtlb, wi, ei); |
538 |
|
539 |
*access = region_attr_to_access(entry->attr); |
540 |
if (!is_access_granted(*access, is_write)) {
|
541 |
return dtlb ?
|
542 |
(is_write ? |
543 |
STORE_PROHIBITED_CAUSE : |
544 |
LOAD_PROHIBITED_CAUSE) : |
545 |
INST_FETCH_PROHIBITED_CAUSE; |
546 |
} |
547 |
|
548 |
*paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); |
549 |
*page_size = ~REGION_PAGE_MASK + 1;
|
550 |
|
551 |
return 0; |
552 |
} |
553 |
|
554 |
/*!
|
555 |
* Convert virtual address to physical addr.
|
556 |
* MMU may issue pagewalk and change xtensa autorefill TLB way entry.
|
557 |
*
|
558 |
* \return 0 if ok, exception cause code otherwise
|
559 |
*/
|
560 |
int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, |
561 |
uint32_t vaddr, int is_write, int mmu_idx, |
562 |
uint32_t *paddr, uint32_t *page_size, unsigned *access)
|
563 |
{ |
564 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
565 |
return get_physical_addr_mmu(env, update_tlb,
|
566 |
vaddr, is_write, mmu_idx, paddr, page_size, access, true);
|
567 |
} else if (xtensa_option_bits_enabled(env->config, |
568 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
569 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { |
570 |
return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
|
571 |
paddr, page_size, access); |
572 |
} else {
|
573 |
*paddr = vaddr; |
574 |
*page_size = TARGET_PAGE_SIZE; |
575 |
*access = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
576 |
return 0; |
577 |
} |
578 |
} |
579 |
|
580 |
static void dump_tlb(FILE *f, fprintf_function cpu_fprintf, |
581 |
CPUXtensaState *env, bool dtlb)
|
582 |
{ |
583 |
unsigned wi, ei;
|
584 |
const xtensa_tlb *conf =
|
585 |
dtlb ? &env->config->dtlb : &env->config->itlb; |
586 |
unsigned (*attr_to_access)(uint32_t) =
|
587 |
xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? |
588 |
mmu_attr_to_access : region_attr_to_access; |
589 |
|
590 |
for (wi = 0; wi < conf->nways; ++wi) { |
591 |
uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
|
592 |
const char *sz_text; |
593 |
bool print_header = true; |
594 |
|
595 |
if (sz >= 0x100000) { |
596 |
sz >>= 20;
|
597 |
sz_text = "MB";
|
598 |
} else {
|
599 |
sz >>= 10;
|
600 |
sz_text = "KB";
|
601 |
} |
602 |
|
603 |
for (ei = 0; ei < conf->way_size[wi]; ++ei) { |
604 |
const xtensa_tlb_entry *entry =
|
605 |
xtensa_tlb_get_entry(env, dtlb, wi, ei); |
606 |
|
607 |
if (entry->asid) {
|
608 |
unsigned access = attr_to_access(entry->attr);
|
609 |
|
610 |
if (print_header) {
|
611 |
print_header = false;
|
612 |
cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
|
613 |
cpu_fprintf(f, |
614 |
"\tVaddr Paddr ASID Attr RWX\n"
|
615 |
"\t---------- ---------- ---- ---- ---\n");
|
616 |
} |
617 |
cpu_fprintf(f, |
618 |
"\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c\n",
|
619 |
entry->vaddr, |
620 |
entry->paddr, |
621 |
entry->asid, |
622 |
entry->attr, |
623 |
(access & PAGE_READ) ? 'R' : '-', |
624 |
(access & PAGE_WRITE) ? 'W' : '-', |
625 |
(access & PAGE_EXEC) ? 'X' : '-'); |
626 |
} |
627 |
} |
628 |
} |
629 |
} |
630 |
|
631 |
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
|
632 |
{ |
633 |
if (xtensa_option_bits_enabled(env->config,
|
634 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
635 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | |
636 |
XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { |
637 |
|
638 |
cpu_fprintf(f, "ITLB:\n");
|
639 |
dump_tlb(f, cpu_fprintf, env, false);
|
640 |
cpu_fprintf(f, "\nDTLB:\n");
|
641 |
dump_tlb(f, cpu_fprintf, env, true);
|
642 |
} else {
|
643 |
cpu_fprintf(f, "No TLB for this CPU core\n");
|
644 |
} |
645 |
} |