root / target-xtensa / helper.c @ a8170e5e
History | View | Annotate | Download (19.1 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
|
3 |
* All rights reserved.
|
4 |
*
|
5 |
* Redistribution and use in source and binary forms, with or without
|
6 |
* modification, are permitted provided that the following conditions are met:
|
7 |
* * Redistributions of source code must retain the above copyright
|
8 |
* notice, this list of conditions and the following disclaimer.
|
9 |
* * Redistributions in binary form must reproduce the above copyright
|
10 |
* notice, this list of conditions and the following disclaimer in the
|
11 |
* documentation and/or other materials provided with the distribution.
|
12 |
* * Neither the name of the Open Source and Linux Lab nor the
|
13 |
* names of its contributors may be used to endorse or promote products
|
14 |
* derived from this software without specific prior written permission.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
17 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
18 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
19 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
20 |
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*/
|
27 |
|
28 |
#include "cpu.h" |
29 |
#include "exec-all.h" |
30 |
#include "gdbstub.h" |
31 |
#include "host-utils.h" |
32 |
#if !defined(CONFIG_USER_ONLY)
|
33 |
#include "hw/loader.h" |
34 |
#endif
|
35 |
|
36 |
static struct XtensaConfigList *xtensa_cores; |
37 |
|
38 |
void xtensa_register_core(XtensaConfigList *node)
|
39 |
{ |
40 |
node->next = xtensa_cores; |
41 |
xtensa_cores = node; |
42 |
} |
43 |
|
44 |
static uint32_t check_hw_breakpoints(CPUXtensaState *env)
|
45 |
{ |
46 |
unsigned i;
|
47 |
|
48 |
for (i = 0; i < env->config->ndbreak; ++i) { |
49 |
if (env->cpu_watchpoint[i] &&
|
50 |
env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) { |
51 |
return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
|
52 |
} |
53 |
} |
54 |
return 0; |
55 |
} |
56 |
|
57 |
static void breakpoint_handler(CPUXtensaState *env) |
58 |
{ |
59 |
if (env->watchpoint_hit) {
|
60 |
if (env->watchpoint_hit->flags & BP_CPU) {
|
61 |
uint32_t cause; |
62 |
|
63 |
env->watchpoint_hit = NULL;
|
64 |
cause = check_hw_breakpoints(env); |
65 |
if (cause) {
|
66 |
debug_exception_env(env, cause); |
67 |
} |
68 |
cpu_resume_from_signal(env, NULL);
|
69 |
} |
70 |
} |
71 |
} |
72 |
|
73 |
XtensaCPU *cpu_xtensa_init(const char *cpu_model) |
74 |
{ |
75 |
static int tcg_inited; |
76 |
static int debug_handler_inited; |
77 |
XtensaCPU *cpu; |
78 |
CPUXtensaState *env; |
79 |
const XtensaConfig *config = NULL; |
80 |
XtensaConfigList *core = xtensa_cores; |
81 |
|
82 |
for (; core; core = core->next)
|
83 |
if (strcmp(core->config->name, cpu_model) == 0) { |
84 |
config = core->config; |
85 |
break;
|
86 |
} |
87 |
|
88 |
if (config == NULL) { |
89 |
return NULL; |
90 |
} |
91 |
|
92 |
cpu = XTENSA_CPU(object_new(TYPE_XTENSA_CPU)); |
93 |
env = &cpu->env; |
94 |
env->config = config; |
95 |
|
96 |
if (!tcg_inited) {
|
97 |
tcg_inited = 1;
|
98 |
xtensa_translate_init(); |
99 |
} |
100 |
|
101 |
if (!debug_handler_inited && tcg_enabled()) {
|
102 |
debug_handler_inited = 1;
|
103 |
cpu_set_debug_excp_handler(breakpoint_handler); |
104 |
} |
105 |
|
106 |
xtensa_irq_init(env); |
107 |
qemu_init_vcpu(env); |
108 |
return cpu;
|
109 |
} |
110 |
|
111 |
|
112 |
void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
|
113 |
{ |
114 |
XtensaConfigList *core = xtensa_cores; |
115 |
cpu_fprintf(f, "Available CPUs:\n");
|
116 |
for (; core; core = core->next) {
|
117 |
cpu_fprintf(f, " %s\n", core->config->name);
|
118 |
} |
119 |
} |
120 |
|
121 |
hwaddr cpu_get_phys_page_debug(CPUXtensaState *env, target_ulong addr) |
122 |
{ |
123 |
uint32_t paddr; |
124 |
uint32_t page_size; |
125 |
unsigned access;
|
126 |
|
127 |
if (xtensa_get_physical_addr(env, false, addr, 0, 0, |
128 |
&paddr, &page_size, &access) == 0) {
|
129 |
return paddr;
|
130 |
} |
131 |
if (xtensa_get_physical_addr(env, false, addr, 2, 0, |
132 |
&paddr, &page_size, &access) == 0) {
|
133 |
return paddr;
|
134 |
} |
135 |
return ~0; |
136 |
} |
137 |
|
138 |
static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
|
139 |
{ |
140 |
if (xtensa_option_enabled(env->config,
|
141 |
XTENSA_OPTION_RELOCATABLE_VECTOR)) { |
142 |
return vector - env->config->vecbase + env->sregs[VECBASE];
|
143 |
} else {
|
144 |
return vector;
|
145 |
} |
146 |
} |
147 |
|
148 |
/*!
|
149 |
* Handle penging IRQ.
|
150 |
* For the high priority interrupt jump to the corresponding interrupt vector.
|
151 |
* For the level-1 interrupt convert it to either user, kernel or double
|
152 |
* exception with the 'level-1 interrupt' exception cause.
|
153 |
*/
|
154 |
static void handle_interrupt(CPUXtensaState *env) |
155 |
{ |
156 |
int level = env->pending_irq_level;
|
157 |
|
158 |
if (level > xtensa_get_cintlevel(env) &&
|
159 |
level <= env->config->nlevel && |
160 |
(env->config->level_mask[level] & |
161 |
env->sregs[INTSET] & |
162 |
env->sregs[INTENABLE])) { |
163 |
if (level > 1) { |
164 |
env->sregs[EPC1 + level - 1] = env->pc;
|
165 |
env->sregs[EPS2 + level - 2] = env->sregs[PS];
|
166 |
env->sregs[PS] = |
167 |
(env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM; |
168 |
env->pc = relocated_vector(env, |
169 |
env->config->interrupt_vector[level]); |
170 |
} else {
|
171 |
env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE; |
172 |
|
173 |
if (env->sregs[PS] & PS_EXCM) {
|
174 |
if (env->config->ndepc) {
|
175 |
env->sregs[DEPC] = env->pc; |
176 |
} else {
|
177 |
env->sregs[EPC1] = env->pc; |
178 |
} |
179 |
env->exception_index = EXC_DOUBLE; |
180 |
} else {
|
181 |
env->sregs[EPC1] = env->pc; |
182 |
env->exception_index = |
183 |
(env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; |
184 |
} |
185 |
env->sregs[PS] |= PS_EXCM; |
186 |
} |
187 |
env->exception_taken = 1;
|
188 |
} |
189 |
} |
190 |
|
191 |
void do_interrupt(CPUXtensaState *env)
|
192 |
{ |
193 |
if (env->exception_index == EXC_IRQ) {
|
194 |
qemu_log_mask(CPU_LOG_INT, |
195 |
"%s(EXC_IRQ) level = %d, cintlevel = %d, "
|
196 |
"pc = %08x, a0 = %08x, ps = %08x, "
|
197 |
"intset = %08x, intenable = %08x, "
|
198 |
"ccount = %08x\n",
|
199 |
__func__, env->pending_irq_level, xtensa_get_cintlevel(env), |
200 |
env->pc, env->regs[0], env->sregs[PS],
|
201 |
env->sregs[INTSET], env->sregs[INTENABLE], |
202 |
env->sregs[CCOUNT]); |
203 |
handle_interrupt(env); |
204 |
} |
205 |
|
206 |
switch (env->exception_index) {
|
207 |
case EXC_WINDOW_OVERFLOW4:
|
208 |
case EXC_WINDOW_UNDERFLOW4:
|
209 |
case EXC_WINDOW_OVERFLOW8:
|
210 |
case EXC_WINDOW_UNDERFLOW8:
|
211 |
case EXC_WINDOW_OVERFLOW12:
|
212 |
case EXC_WINDOW_UNDERFLOW12:
|
213 |
case EXC_KERNEL:
|
214 |
case EXC_USER:
|
215 |
case EXC_DOUBLE:
|
216 |
case EXC_DEBUG:
|
217 |
qemu_log_mask(CPU_LOG_INT, "%s(%d) "
|
218 |
"pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
|
219 |
__func__, env->exception_index, |
220 |
env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
|
221 |
if (env->config->exception_vector[env->exception_index]) {
|
222 |
env->pc = relocated_vector(env, |
223 |
env->config->exception_vector[env->exception_index]); |
224 |
env->exception_taken = 1;
|
225 |
} else {
|
226 |
qemu_log("%s(pc = %08x) bad exception_index: %d\n",
|
227 |
__func__, env->pc, env->exception_index); |
228 |
} |
229 |
break;
|
230 |
|
231 |
case EXC_IRQ:
|
232 |
break;
|
233 |
|
234 |
default:
|
235 |
qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
|
236 |
__func__, env->pc, env->exception_index); |
237 |
break;
|
238 |
} |
239 |
check_interrupts(env); |
240 |
} |
241 |
|
242 |
static void reset_tlb_mmu_all_ways(CPUXtensaState *env, |
243 |
const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
|
244 |
{ |
245 |
unsigned wi, ei;
|
246 |
|
247 |
for (wi = 0; wi < tlb->nways; ++wi) { |
248 |
for (ei = 0; ei < tlb->way_size[wi]; ++ei) { |
249 |
entry[wi][ei].asid = 0;
|
250 |
entry[wi][ei].variable = true;
|
251 |
} |
252 |
} |
253 |
} |
254 |
|
255 |
static void reset_tlb_mmu_ways56(CPUXtensaState *env, |
256 |
const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
|
257 |
{ |
258 |
if (!tlb->varway56) {
|
259 |
static const xtensa_tlb_entry way5[] = { |
260 |
{ |
261 |
.vaddr = 0xd0000000,
|
262 |
.paddr = 0,
|
263 |
.asid = 1,
|
264 |
.attr = 7,
|
265 |
.variable = false,
|
266 |
}, { |
267 |
.vaddr = 0xd8000000,
|
268 |
.paddr = 0,
|
269 |
.asid = 1,
|
270 |
.attr = 3,
|
271 |
.variable = false,
|
272 |
} |
273 |
}; |
274 |
static const xtensa_tlb_entry way6[] = { |
275 |
{ |
276 |
.vaddr = 0xe0000000,
|
277 |
.paddr = 0xf0000000,
|
278 |
.asid = 1,
|
279 |
.attr = 7,
|
280 |
.variable = false,
|
281 |
}, { |
282 |
.vaddr = 0xf0000000,
|
283 |
.paddr = 0xf0000000,
|
284 |
.asid = 1,
|
285 |
.attr = 3,
|
286 |
.variable = false,
|
287 |
} |
288 |
}; |
289 |
memcpy(entry[5], way5, sizeof(way5)); |
290 |
memcpy(entry[6], way6, sizeof(way6)); |
291 |
} else {
|
292 |
uint32_t ei; |
293 |
for (ei = 0; ei < 8; ++ei) { |
294 |
entry[6][ei].vaddr = ei << 29; |
295 |
entry[6][ei].paddr = ei << 29; |
296 |
entry[6][ei].asid = 1; |
297 |
entry[6][ei].attr = 3; |
298 |
} |
299 |
} |
300 |
} |
301 |
|
302 |
static void reset_tlb_region_way0(CPUXtensaState *env, |
303 |
xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
304 |
{ |
305 |
unsigned ei;
|
306 |
|
307 |
for (ei = 0; ei < 8; ++ei) { |
308 |
entry[0][ei].vaddr = ei << 29; |
309 |
entry[0][ei].paddr = ei << 29; |
310 |
entry[0][ei].asid = 1; |
311 |
entry[0][ei].attr = 2; |
312 |
entry[0][ei].variable = true; |
313 |
} |
314 |
} |
315 |
|
316 |
void reset_mmu(CPUXtensaState *env)
|
317 |
{ |
318 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
319 |
env->sregs[RASID] = 0x04030201;
|
320 |
env->sregs[ITLBCFG] = 0;
|
321 |
env->sregs[DTLBCFG] = 0;
|
322 |
env->autorefill_idx = 0;
|
323 |
reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); |
324 |
reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); |
325 |
reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); |
326 |
reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); |
327 |
} else {
|
328 |
reset_tlb_region_way0(env, env->itlb); |
329 |
reset_tlb_region_way0(env, env->dtlb); |
330 |
} |
331 |
} |
332 |
|
333 |
static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) |
334 |
{ |
335 |
unsigned i;
|
336 |
for (i = 0; i < 4; ++i) { |
337 |
if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { |
338 |
return i;
|
339 |
} |
340 |
} |
341 |
return 0xff; |
342 |
} |
343 |
|
344 |
/*!
|
345 |
* Lookup xtensa TLB for the given virtual address.
|
346 |
* See ISA, 4.6.2.2
|
347 |
*
|
348 |
* \param pwi: [out] way index
|
349 |
* \param pei: [out] entry index
|
350 |
* \param pring: [out] access ring
|
351 |
* \return 0 if ok, exception cause code otherwise
|
352 |
*/
|
353 |
int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, |
354 |
uint32_t *pwi, uint32_t *pei, uint8_t *pring) |
355 |
{ |
356 |
const xtensa_tlb *tlb = dtlb ?
|
357 |
&env->config->dtlb : &env->config->itlb; |
358 |
const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
|
359 |
env->dtlb : env->itlb; |
360 |
|
361 |
int nhits = 0; |
362 |
unsigned wi;
|
363 |
|
364 |
for (wi = 0; wi < tlb->nways; ++wi) { |
365 |
uint32_t vpn; |
366 |
uint32_t ei; |
367 |
split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); |
368 |
if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
|
369 |
unsigned ring = get_ring(env, entry[wi][ei].asid);
|
370 |
if (ring < 4) { |
371 |
if (++nhits > 1) { |
372 |
return dtlb ?
|
373 |
LOAD_STORE_TLB_MULTI_HIT_CAUSE : |
374 |
INST_TLB_MULTI_HIT_CAUSE; |
375 |
} |
376 |
*pwi = wi; |
377 |
*pei = ei; |
378 |
*pring = ring; |
379 |
} |
380 |
} |
381 |
} |
382 |
return nhits ? 0 : |
383 |
(dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); |
384 |
} |
385 |
|
386 |
/*!
|
387 |
* Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
|
388 |
* See ISA, 4.6.5.10
|
389 |
*/
|
390 |
static unsigned mmu_attr_to_access(uint32_t attr) |
391 |
{ |
392 |
unsigned access = 0; |
393 |
if (attr < 12) { |
394 |
access |= PAGE_READ; |
395 |
if (attr & 0x1) { |
396 |
access |= PAGE_EXEC; |
397 |
} |
398 |
if (attr & 0x2) { |
399 |
access |= PAGE_WRITE; |
400 |
} |
401 |
} else if (attr == 13) { |
402 |
access |= PAGE_READ | PAGE_WRITE; |
403 |
} |
404 |
return access;
|
405 |
} |
406 |
|
407 |
/*!
|
408 |
* Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
|
409 |
* See ISA, 4.6.3.3
|
410 |
*/
|
411 |
static unsigned region_attr_to_access(uint32_t attr) |
412 |
{ |
413 |
unsigned access = 0; |
414 |
if ((attr < 6 && attr != 3) || attr == 14) { |
415 |
access |= PAGE_READ | PAGE_WRITE; |
416 |
} |
417 |
if (attr > 0 && attr < 6) { |
418 |
access |= PAGE_EXEC; |
419 |
} |
420 |
return access;
|
421 |
} |
422 |
|
423 |
static bool is_access_granted(unsigned access, int is_write) |
424 |
{ |
425 |
switch (is_write) {
|
426 |
case 0: |
427 |
return access & PAGE_READ;
|
428 |
|
429 |
case 1: |
430 |
return access & PAGE_WRITE;
|
431 |
|
432 |
case 2: |
433 |
return access & PAGE_EXEC;
|
434 |
|
435 |
default:
|
436 |
return 0; |
437 |
} |
438 |
} |
439 |
|
440 |
static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); |
441 |
|
442 |
static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, |
443 |
uint32_t vaddr, int is_write, int mmu_idx, |
444 |
uint32_t *paddr, uint32_t *page_size, unsigned *access,
|
445 |
bool may_lookup_pt)
|
446 |
{ |
447 |
bool dtlb = is_write != 2; |
448 |
uint32_t wi; |
449 |
uint32_t ei; |
450 |
uint8_t ring; |
451 |
uint32_t vpn; |
452 |
uint32_t pte; |
453 |
const xtensa_tlb_entry *entry = NULL; |
454 |
xtensa_tlb_entry tmp_entry; |
455 |
int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
|
456 |
|
457 |
if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
|
458 |
may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
|
459 |
ring = (pte >> 4) & 0x3; |
460 |
wi = 0;
|
461 |
split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); |
462 |
|
463 |
if (update_tlb) {
|
464 |
wi = ++env->autorefill_idx & 0x3;
|
465 |
xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); |
466 |
env->sregs[EXCVADDR] = vaddr; |
467 |
qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
|
468 |
__func__, vaddr, vpn, pte); |
469 |
} else {
|
470 |
xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); |
471 |
entry = &tmp_entry; |
472 |
} |
473 |
ret = 0;
|
474 |
} |
475 |
if (ret != 0) { |
476 |
return ret;
|
477 |
} |
478 |
|
479 |
if (entry == NULL) { |
480 |
entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); |
481 |
} |
482 |
|
483 |
if (ring < mmu_idx) {
|
484 |
return dtlb ?
|
485 |
LOAD_STORE_PRIVILEGE_CAUSE : |
486 |
INST_FETCH_PRIVILEGE_CAUSE; |
487 |
} |
488 |
|
489 |
*access = mmu_attr_to_access(entry->attr); |
490 |
if (!is_access_granted(*access, is_write)) {
|
491 |
return dtlb ?
|
492 |
(is_write ? |
493 |
STORE_PROHIBITED_CAUSE : |
494 |
LOAD_PROHIBITED_CAUSE) : |
495 |
INST_FETCH_PROHIBITED_CAUSE; |
496 |
} |
497 |
|
498 |
*paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); |
499 |
*page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
|
500 |
|
501 |
return 0; |
502 |
} |
503 |
|
504 |
static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) |
505 |
{ |
506 |
uint32_t paddr; |
507 |
uint32_t page_size; |
508 |
unsigned access;
|
509 |
uint32_t pt_vaddr = |
510 |
(env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; |
511 |
int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, |
512 |
&paddr, &page_size, &access, false);
|
513 |
|
514 |
qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__,
|
515 |
vaddr, ret ? ~0 : paddr);
|
516 |
|
517 |
if (ret == 0) { |
518 |
*pte = ldl_phys(paddr); |
519 |
} |
520 |
return ret;
|
521 |
} |
522 |
|
523 |
static int get_physical_addr_region(CPUXtensaState *env, |
524 |
uint32_t vaddr, int is_write, int mmu_idx, |
525 |
uint32_t *paddr, uint32_t *page_size, unsigned *access)
|
526 |
{ |
527 |
bool dtlb = is_write != 2; |
528 |
uint32_t wi = 0;
|
529 |
uint32_t ei = (vaddr >> 29) & 0x7; |
530 |
const xtensa_tlb_entry *entry =
|
531 |
xtensa_tlb_get_entry(env, dtlb, wi, ei); |
532 |
|
533 |
*access = region_attr_to_access(entry->attr); |
534 |
if (!is_access_granted(*access, is_write)) {
|
535 |
return dtlb ?
|
536 |
(is_write ? |
537 |
STORE_PROHIBITED_CAUSE : |
538 |
LOAD_PROHIBITED_CAUSE) : |
539 |
INST_FETCH_PROHIBITED_CAUSE; |
540 |
} |
541 |
|
542 |
*paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); |
543 |
*page_size = ~REGION_PAGE_MASK + 1;
|
544 |
|
545 |
return 0; |
546 |
} |
547 |
|
548 |
/*!
|
549 |
* Convert virtual address to physical addr.
|
550 |
* MMU may issue pagewalk and change xtensa autorefill TLB way entry.
|
551 |
*
|
552 |
* \return 0 if ok, exception cause code otherwise
|
553 |
*/
|
554 |
int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, |
555 |
uint32_t vaddr, int is_write, int mmu_idx, |
556 |
uint32_t *paddr, uint32_t *page_size, unsigned *access)
|
557 |
{ |
558 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
559 |
return get_physical_addr_mmu(env, update_tlb,
|
560 |
vaddr, is_write, mmu_idx, paddr, page_size, access, true);
|
561 |
} else if (xtensa_option_bits_enabled(env->config, |
562 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
563 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { |
564 |
return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
|
565 |
paddr, page_size, access); |
566 |
} else {
|
567 |
*paddr = vaddr; |
568 |
*page_size = TARGET_PAGE_SIZE; |
569 |
*access = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
570 |
return 0; |
571 |
} |
572 |
} |
573 |
|
574 |
static void dump_tlb(FILE *f, fprintf_function cpu_fprintf, |
575 |
CPUXtensaState *env, bool dtlb)
|
576 |
{ |
577 |
unsigned wi, ei;
|
578 |
const xtensa_tlb *conf =
|
579 |
dtlb ? &env->config->dtlb : &env->config->itlb; |
580 |
unsigned (*attr_to_access)(uint32_t) =
|
581 |
xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? |
582 |
mmu_attr_to_access : region_attr_to_access; |
583 |
|
584 |
for (wi = 0; wi < conf->nways; ++wi) { |
585 |
uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
|
586 |
const char *sz_text; |
587 |
bool print_header = true; |
588 |
|
589 |
if (sz >= 0x100000) { |
590 |
sz >>= 20;
|
591 |
sz_text = "MB";
|
592 |
} else {
|
593 |
sz >>= 10;
|
594 |
sz_text = "KB";
|
595 |
} |
596 |
|
597 |
for (ei = 0; ei < conf->way_size[wi]; ++ei) { |
598 |
const xtensa_tlb_entry *entry =
|
599 |
xtensa_tlb_get_entry(env, dtlb, wi, ei); |
600 |
|
601 |
if (entry->asid) {
|
602 |
unsigned access = attr_to_access(entry->attr);
|
603 |
|
604 |
if (print_header) {
|
605 |
print_header = false;
|
606 |
cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
|
607 |
cpu_fprintf(f, |
608 |
"\tVaddr Paddr ASID Attr RWX\n"
|
609 |
"\t---------- ---------- ---- ---- ---\n");
|
610 |
} |
611 |
cpu_fprintf(f, |
612 |
"\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c\n",
|
613 |
entry->vaddr, |
614 |
entry->paddr, |
615 |
entry->asid, |
616 |
entry->attr, |
617 |
(access & PAGE_READ) ? 'R' : '-', |
618 |
(access & PAGE_WRITE) ? 'W' : '-', |
619 |
(access & PAGE_EXEC) ? 'X' : '-'); |
620 |
} |
621 |
} |
622 |
} |
623 |
} |
624 |
|
625 |
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
|
626 |
{ |
627 |
if (xtensa_option_bits_enabled(env->config,
|
628 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
629 |
XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | |
630 |
XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { |
631 |
|
632 |
cpu_fprintf(f, "ITLB:\n");
|
633 |
dump_tlb(f, cpu_fprintf, env, false);
|
634 |
cpu_fprintf(f, "\nDTLB:\n");
|
635 |
dump_tlb(f, cpu_fprintf, env, true);
|
636 |
} else {
|
637 |
cpu_fprintf(f, "No TLB for this CPU core\n");
|
638 |
} |
639 |
} |