root / target-xtensa / op_helper.c @ 7d890b40
History | View | Annotate | Download (18.7 kB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
|
3 |
* All rights reserved.
|
4 |
*
|
5 |
* Redistribution and use in source and binary forms, with or without
|
6 |
* modification, are permitted provided that the following conditions are met:
|
7 |
* * Redistributions of source code must retain the above copyright
|
8 |
* notice, this list of conditions and the following disclaimer.
|
9 |
* * Redistributions in binary form must reproduce the above copyright
|
10 |
* notice, this list of conditions and the following disclaimer in the
|
11 |
* documentation and/or other materials provided with the distribution.
|
12 |
* * Neither the name of the Open Source and Linux Lab nor the
|
13 |
* names of its contributors may be used to endorse or promote products
|
14 |
* derived from this software without specific prior written permission.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
17 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
18 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
19 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
20 |
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
*/
|
27 |
|
28 |
#include "cpu.h" |
29 |
#include "dyngen-exec.h" |
30 |
#include "helpers.h" |
31 |
#include "host-utils.h" |
32 |
|
33 |
static void do_unaligned_access(target_ulong addr, int is_write, int is_user, |
34 |
void *retaddr);
|
35 |
|
36 |
#define ALIGNED_ONLY
|
37 |
#define MMUSUFFIX _mmu
|
38 |
|
39 |
#define SHIFT 0 |
40 |
#include "softmmu_template.h" |
41 |
|
42 |
#define SHIFT 1 |
43 |
#include "softmmu_template.h" |
44 |
|
45 |
#define SHIFT 2 |
46 |
#include "softmmu_template.h" |
47 |
|
48 |
#define SHIFT 3 |
49 |
#include "softmmu_template.h" |
50 |
|
51 |
static void do_restore_state(void *pc_ptr) |
52 |
{ |
53 |
TranslationBlock *tb; |
54 |
uint32_t pc = (uint32_t)(intptr_t)pc_ptr; |
55 |
|
56 |
tb = tb_find_pc(pc); |
57 |
if (tb) {
|
58 |
cpu_restore_state(tb, env, pc); |
59 |
} |
60 |
} |
61 |
|
62 |
static void do_unaligned_access(target_ulong addr, int is_write, int is_user, |
63 |
void *retaddr)
|
64 |
{ |
65 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
|
66 |
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { |
67 |
do_restore_state(retaddr); |
68 |
HELPER(exception_cause_vaddr)( |
69 |
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); |
70 |
} |
71 |
} |
72 |
|
73 |
void tlb_fill(target_ulong vaddr, int is_write, int mmu_idx, void *retaddr) |
74 |
{ |
75 |
CPUState *saved_env = env; |
76 |
|
77 |
env = cpu_single_env; |
78 |
{ |
79 |
uint32_t paddr; |
80 |
uint32_t page_size; |
81 |
unsigned access;
|
82 |
int ret = xtensa_get_physical_addr(env, vaddr, is_write, mmu_idx,
|
83 |
&paddr, &page_size, &access); |
84 |
|
85 |
qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__,
|
86 |
vaddr, is_write, mmu_idx, paddr, ret); |
87 |
|
88 |
if (ret == 0) { |
89 |
tlb_set_page(env, |
90 |
vaddr & TARGET_PAGE_MASK, |
91 |
paddr & TARGET_PAGE_MASK, |
92 |
access, mmu_idx, page_size); |
93 |
} else {
|
94 |
do_restore_state(retaddr); |
95 |
HELPER(exception_cause_vaddr)(env->pc, ret, vaddr); |
96 |
} |
97 |
} |
98 |
env = saved_env; |
99 |
} |
100 |
|
101 |
void HELPER(exception)(uint32_t excp)
|
102 |
{ |
103 |
env->exception_index = excp; |
104 |
cpu_loop_exit(env); |
105 |
} |
106 |
|
107 |
void HELPER(exception_cause)(uint32_t pc, uint32_t cause)
|
108 |
{ |
109 |
uint32_t vector; |
110 |
|
111 |
env->pc = pc; |
112 |
if (env->sregs[PS] & PS_EXCM) {
|
113 |
if (env->config->ndepc) {
|
114 |
env->sregs[DEPC] = pc; |
115 |
} else {
|
116 |
env->sregs[EPC1] = pc; |
117 |
} |
118 |
vector = EXC_DOUBLE; |
119 |
} else {
|
120 |
env->sregs[EPC1] = pc; |
121 |
vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; |
122 |
} |
123 |
|
124 |
env->sregs[EXCCAUSE] = cause; |
125 |
env->sregs[PS] |= PS_EXCM; |
126 |
|
127 |
HELPER(exception)(vector); |
128 |
} |
129 |
|
130 |
void HELPER(exception_cause_vaddr)(uint32_t pc, uint32_t cause, uint32_t vaddr)
|
131 |
{ |
132 |
env->sregs[EXCVADDR] = vaddr; |
133 |
HELPER(exception_cause)(pc, cause); |
134 |
} |
135 |
|
136 |
uint32_t HELPER(nsa)(uint32_t v) |
137 |
{ |
138 |
if (v & 0x80000000) { |
139 |
v = ~v; |
140 |
} |
141 |
return v ? clz32(v) - 1 : 31; |
142 |
} |
143 |
|
144 |
uint32_t HELPER(nsau)(uint32_t v) |
145 |
{ |
146 |
return v ? clz32(v) : 32; |
147 |
} |
148 |
|
149 |
static void copy_window_from_phys(CPUState *env, |
150 |
uint32_t window, uint32_t phys, uint32_t n) |
151 |
{ |
152 |
assert(phys < env->config->nareg); |
153 |
if (phys + n <= env->config->nareg) {
|
154 |
memcpy(env->regs + window, env->phys_regs + phys, |
155 |
n * sizeof(uint32_t));
|
156 |
} else {
|
157 |
uint32_t n1 = env->config->nareg - phys; |
158 |
memcpy(env->regs + window, env->phys_regs + phys, |
159 |
n1 * sizeof(uint32_t));
|
160 |
memcpy(env->regs + window + n1, env->phys_regs, |
161 |
(n - n1) * sizeof(uint32_t));
|
162 |
} |
163 |
} |
164 |
|
165 |
static void copy_phys_from_window(CPUState *env, |
166 |
uint32_t phys, uint32_t window, uint32_t n) |
167 |
{ |
168 |
assert(phys < env->config->nareg); |
169 |
if (phys + n <= env->config->nareg) {
|
170 |
memcpy(env->phys_regs + phys, env->regs + window, |
171 |
n * sizeof(uint32_t));
|
172 |
} else {
|
173 |
uint32_t n1 = env->config->nareg - phys; |
174 |
memcpy(env->phys_regs + phys, env->regs + window, |
175 |
n1 * sizeof(uint32_t));
|
176 |
memcpy(env->phys_regs, env->regs + window + n1, |
177 |
(n - n1) * sizeof(uint32_t));
|
178 |
} |
179 |
} |
180 |
|
181 |
|
182 |
static inline unsigned windowbase_bound(unsigned a, const CPUState *env) |
183 |
{ |
184 |
return a & (env->config->nareg / 4 - 1); |
185 |
} |
186 |
|
187 |
static inline unsigned windowstart_bit(unsigned a, const CPUState *env) |
188 |
{ |
189 |
return 1 << windowbase_bound(a, env); |
190 |
} |
191 |
|
192 |
void xtensa_sync_window_from_phys(CPUState *env)
|
193 |
{ |
194 |
copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16); |
195 |
} |
196 |
|
197 |
void xtensa_sync_phys_from_window(CPUState *env)
|
198 |
{ |
199 |
copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16); |
200 |
} |
201 |
|
202 |
static void rotate_window_abs(uint32_t position) |
203 |
{ |
204 |
xtensa_sync_phys_from_window(env); |
205 |
env->sregs[WINDOW_BASE] = windowbase_bound(position, env); |
206 |
xtensa_sync_window_from_phys(env); |
207 |
} |
208 |
|
209 |
static void rotate_window(uint32_t delta) |
210 |
{ |
211 |
rotate_window_abs(env->sregs[WINDOW_BASE] + delta); |
212 |
} |
213 |
|
214 |
void HELPER(wsr_windowbase)(uint32_t v)
|
215 |
{ |
216 |
rotate_window_abs(v); |
217 |
} |
218 |
|
219 |
void HELPER(entry)(uint32_t pc, uint32_t s, uint32_t imm)
|
220 |
{ |
221 |
int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
|
222 |
if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { |
223 |
qemu_log("Illegal entry instruction(pc = %08x), PS = %08x\n",
|
224 |
pc, env->sregs[PS]); |
225 |
HELPER(exception_cause)(pc, ILLEGAL_INSTRUCTION_CAUSE); |
226 |
} else {
|
227 |
env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3); |
228 |
rotate_window(callinc); |
229 |
env->sregs[WINDOW_START] |= |
230 |
windowstart_bit(env->sregs[WINDOW_BASE], env); |
231 |
} |
232 |
} |
233 |
|
234 |
void HELPER(window_check)(uint32_t pc, uint32_t w)
|
235 |
{ |
236 |
uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); |
237 |
uint32_t windowstart = env->sregs[WINDOW_START]; |
238 |
uint32_t m, n; |
239 |
|
240 |
if ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) {
|
241 |
return;
|
242 |
} |
243 |
|
244 |
for (n = 1; ; ++n) { |
245 |
if (n > w) {
|
246 |
return;
|
247 |
} |
248 |
if (windowstart & windowstart_bit(windowbase + n, env)) {
|
249 |
break;
|
250 |
} |
251 |
} |
252 |
|
253 |
m = windowbase_bound(windowbase + n, env); |
254 |
rotate_window(n); |
255 |
env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | |
256 |
(windowbase << PS_OWB_SHIFT) | PS_EXCM; |
257 |
env->sregs[EPC1] = env->pc = pc; |
258 |
|
259 |
if (windowstart & windowstart_bit(m + 1, env)) { |
260 |
HELPER(exception)(EXC_WINDOW_OVERFLOW4); |
261 |
} else if (windowstart & windowstart_bit(m + 2, env)) { |
262 |
HELPER(exception)(EXC_WINDOW_OVERFLOW8); |
263 |
} else {
|
264 |
HELPER(exception)(EXC_WINDOW_OVERFLOW12); |
265 |
} |
266 |
} |
267 |
|
268 |
uint32_t HELPER(retw)(uint32_t pc) |
269 |
{ |
270 |
int n = (env->regs[0] >> 30) & 0x3; |
271 |
int m = 0; |
272 |
uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); |
273 |
uint32_t windowstart = env->sregs[WINDOW_START]; |
274 |
uint32_t ret_pc = 0;
|
275 |
|
276 |
if (windowstart & windowstart_bit(windowbase - 1, env)) { |
277 |
m = 1;
|
278 |
} else if (windowstart & windowstart_bit(windowbase - 2, env)) { |
279 |
m = 2;
|
280 |
} else if (windowstart & windowstart_bit(windowbase - 3, env)) { |
281 |
m = 3;
|
282 |
} |
283 |
|
284 |
if (n == 0 || (m != 0 && m != n) || |
285 |
((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
|
286 |
qemu_log("Illegal retw instruction(pc = %08x), "
|
287 |
"PS = %08x, m = %d, n = %d\n",
|
288 |
pc, env->sregs[PS], m, n); |
289 |
HELPER(exception_cause)(pc, ILLEGAL_INSTRUCTION_CAUSE); |
290 |
} else {
|
291 |
int owb = windowbase;
|
292 |
|
293 |
ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff); |
294 |
|
295 |
rotate_window(-n); |
296 |
if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
|
297 |
env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env); |
298 |
} else {
|
299 |
/* window underflow */
|
300 |
env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | |
301 |
(windowbase << PS_OWB_SHIFT) | PS_EXCM; |
302 |
env->sregs[EPC1] = env->pc = pc; |
303 |
|
304 |
if (n == 1) { |
305 |
HELPER(exception)(EXC_WINDOW_UNDERFLOW4); |
306 |
} else if (n == 2) { |
307 |
HELPER(exception)(EXC_WINDOW_UNDERFLOW8); |
308 |
} else if (n == 3) { |
309 |
HELPER(exception)(EXC_WINDOW_UNDERFLOW12); |
310 |
} |
311 |
} |
312 |
} |
313 |
return ret_pc;
|
314 |
} |
315 |
|
316 |
void HELPER(rotw)(uint32_t imm4)
|
317 |
{ |
318 |
rotate_window(imm4); |
319 |
} |
320 |
|
321 |
void HELPER(restore_owb)(void) |
322 |
{ |
323 |
rotate_window_abs((env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); |
324 |
} |
325 |
|
326 |
void HELPER(movsp)(uint32_t pc)
|
327 |
{ |
328 |
if ((env->sregs[WINDOW_START] &
|
329 |
(windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
|
330 |
windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
|
331 |
windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) { |
332 |
HELPER(exception_cause)(pc, ALLOCA_CAUSE); |
333 |
} |
334 |
} |
335 |
|
336 |
void HELPER(wsr_lbeg)(uint32_t v)
|
337 |
{ |
338 |
if (env->sregs[LBEG] != v) {
|
339 |
tb_invalidate_phys_page_range( |
340 |
env->sregs[LEND] - 1, env->sregs[LEND], 0); |
341 |
env->sregs[LBEG] = v; |
342 |
} |
343 |
} |
344 |
|
345 |
void HELPER(wsr_lend)(uint32_t v)
|
346 |
{ |
347 |
if (env->sregs[LEND] != v) {
|
348 |
tb_invalidate_phys_page_range( |
349 |
env->sregs[LEND] - 1, env->sregs[LEND], 0); |
350 |
env->sregs[LEND] = v; |
351 |
tb_invalidate_phys_page_range( |
352 |
env->sregs[LEND] - 1, env->sregs[LEND], 0); |
353 |
} |
354 |
} |
355 |
|
356 |
void HELPER(dump_state)(void) |
357 |
{ |
358 |
cpu_dump_state(env, stderr, fprintf, 0);
|
359 |
} |
360 |
|
361 |
void HELPER(waiti)(uint32_t pc, uint32_t intlevel)
|
362 |
{ |
363 |
env->pc = pc; |
364 |
env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | |
365 |
(intlevel << PS_INTLEVEL_SHIFT); |
366 |
check_interrupts(env); |
367 |
if (env->pending_irq_level) {
|
368 |
cpu_loop_exit(env); |
369 |
return;
|
370 |
} |
371 |
|
372 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) {
|
373 |
int i;
|
374 |
uint32_t wake_ccount = env->sregs[CCOUNT] - 1;
|
375 |
|
376 |
for (i = 0; i < env->config->nccompare; ++i) { |
377 |
if (env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] <
|
378 |
wake_ccount - env->sregs[CCOUNT]) { |
379 |
wake_ccount = env->sregs[CCOMPARE + i]; |
380 |
} |
381 |
} |
382 |
env->wake_ccount = wake_ccount; |
383 |
qemu_mod_timer(env->ccompare_timer, qemu_get_clock_ns(vm_clock) + |
384 |
muldiv64(wake_ccount - env->sregs[CCOUNT], |
385 |
1000000, env->config->clock_freq_khz));
|
386 |
} |
387 |
env->halt_clock = qemu_get_clock_ns(vm_clock); |
388 |
env->halted = 1;
|
389 |
HELPER(exception)(EXCP_HLT); |
390 |
} |
391 |
|
392 |
void HELPER(timer_irq)(uint32_t id, uint32_t active)
|
393 |
{ |
394 |
xtensa_timer_irq(env, id, active); |
395 |
} |
396 |
|
397 |
void HELPER(advance_ccount)(uint32_t d)
|
398 |
{ |
399 |
xtensa_advance_ccount(env, d); |
400 |
} |
401 |
|
402 |
void HELPER(check_interrupts)(CPUState *env)
|
403 |
{ |
404 |
check_interrupts(env); |
405 |
} |
406 |
|
407 |
void HELPER(wsr_rasid)(uint32_t v)
|
408 |
{ |
409 |
v = (v & 0xffffff00) | 0x1; |
410 |
if (v != env->sregs[RASID]) {
|
411 |
env->sregs[RASID] = v; |
412 |
tlb_flush(env, 1);
|
413 |
} |
414 |
} |
415 |
|
416 |
static uint32_t get_page_size(const CPUState *env, bool dtlb, uint32_t way) |
417 |
{ |
418 |
uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; |
419 |
|
420 |
switch (way) {
|
421 |
case 4: |
422 |
return (tlbcfg >> 16) & 0x3; |
423 |
|
424 |
case 5: |
425 |
return (tlbcfg >> 20) & 0x1; |
426 |
|
427 |
case 6: |
428 |
return (tlbcfg >> 24) & 0x1; |
429 |
|
430 |
default:
|
431 |
return 0; |
432 |
} |
433 |
} |
434 |
|
435 |
/*!
|
436 |
* Get bit mask for the virtual address bits translated by the TLB way
|
437 |
*/
|
438 |
uint32_t xtensa_tlb_get_addr_mask(const CPUState *env, bool dtlb, uint32_t way) |
439 |
{ |
440 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
441 |
bool varway56 = dtlb ?
|
442 |
env->config->dtlb.varway56 : |
443 |
env->config->itlb.varway56; |
444 |
|
445 |
switch (way) {
|
446 |
case 4: |
447 |
return 0xfff00000 << get_page_size(env, dtlb, way) * 2; |
448 |
|
449 |
case 5: |
450 |
if (varway56) {
|
451 |
return 0xf8000000 << get_page_size(env, dtlb, way); |
452 |
} else {
|
453 |
return 0xf8000000; |
454 |
} |
455 |
|
456 |
case 6: |
457 |
if (varway56) {
|
458 |
return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); |
459 |
} else {
|
460 |
return 0xf0000000; |
461 |
} |
462 |
|
463 |
default:
|
464 |
return 0xfffff000; |
465 |
} |
466 |
} else {
|
467 |
return REGION_PAGE_MASK;
|
468 |
} |
469 |
} |
470 |
|
471 |
/*!
|
472 |
* Get bit mask for the 'VPN without index' field.
|
473 |
* See ISA, 4.6.5.6, data format for RxTLB0
|
474 |
*/
|
475 |
static uint32_t get_vpn_mask(const CPUState *env, bool dtlb, uint32_t way) |
476 |
{ |
477 |
if (way < 4) { |
478 |
bool is32 = (dtlb ?
|
479 |
env->config->dtlb.nrefillentries : |
480 |
env->config->itlb.nrefillentries) == 32;
|
481 |
return is32 ? 0xffff8000 : 0xffffc000; |
482 |
} else if (way == 4) { |
483 |
return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; |
484 |
} else if (way <= 6) { |
485 |
uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); |
486 |
bool varway56 = dtlb ?
|
487 |
env->config->dtlb.varway56 : |
488 |
env->config->itlb.varway56; |
489 |
|
490 |
if (varway56) {
|
491 |
return mask << (way == 5 ? 2 : 3); |
492 |
} else {
|
493 |
return mask << 1; |
494 |
} |
495 |
} else {
|
496 |
return 0xfffff000; |
497 |
} |
498 |
} |
499 |
|
500 |
/*!
|
501 |
* Split virtual address into VPN (with index) and entry index
|
502 |
* for the given TLB way
|
503 |
*/
|
504 |
void split_tlb_entry_spec_way(const CPUState *env, uint32_t v, bool dtlb, |
505 |
uint32_t *vpn, uint32_t wi, uint32_t *ei) |
506 |
{ |
507 |
bool varway56 = dtlb ?
|
508 |
env->config->dtlb.varway56 : |
509 |
env->config->itlb.varway56; |
510 |
|
511 |
if (!dtlb) {
|
512 |
wi &= 7;
|
513 |
} |
514 |
|
515 |
if (wi < 4) { |
516 |
bool is32 = (dtlb ?
|
517 |
env->config->dtlb.nrefillentries : |
518 |
env->config->itlb.nrefillentries) == 32;
|
519 |
*ei = (v >> 12) & (is32 ? 0x7 : 0x3); |
520 |
} else {
|
521 |
switch (wi) {
|
522 |
case 4: |
523 |
{ |
524 |
uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; |
525 |
*ei = (v >> eibase) & 0x3;
|
526 |
} |
527 |
break;
|
528 |
|
529 |
case 5: |
530 |
if (varway56) {
|
531 |
uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
|
532 |
*ei = (v >> eibase) & 0x3;
|
533 |
} else {
|
534 |
*ei = (v >> 27) & 0x1; |
535 |
} |
536 |
break;
|
537 |
|
538 |
case 6: |
539 |
if (varway56) {
|
540 |
uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
|
541 |
*ei = (v >> eibase) & 0x7;
|
542 |
} else {
|
543 |
*ei = (v >> 28) & 0x1; |
544 |
} |
545 |
break;
|
546 |
|
547 |
default:
|
548 |
*ei = 0;
|
549 |
break;
|
550 |
} |
551 |
} |
552 |
*vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); |
553 |
} |
554 |
|
555 |
/*!
|
556 |
* Split TLB address into TLB way, entry index and VPN (with index).
|
557 |
* See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
|
558 |
*/
|
559 |
static void split_tlb_entry_spec(uint32_t v, bool dtlb, |
560 |
uint32_t *vpn, uint32_t *wi, uint32_t *ei) |
561 |
{ |
562 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
563 |
*wi = v & (dtlb ? 0xf : 0x7); |
564 |
split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); |
565 |
} else {
|
566 |
*vpn = v & REGION_PAGE_MASK; |
567 |
*wi = 0;
|
568 |
*ei = (v >> 29) & 0x7; |
569 |
} |
570 |
} |
571 |
|
572 |
static xtensa_tlb_entry *get_tlb_entry(uint32_t v, bool dtlb, uint32_t *pwi) |
573 |
{ |
574 |
uint32_t vpn; |
575 |
uint32_t wi; |
576 |
uint32_t ei; |
577 |
|
578 |
split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei); |
579 |
if (pwi) {
|
580 |
*pwi = wi; |
581 |
} |
582 |
return xtensa_tlb_get_entry(env, dtlb, wi, ei);
|
583 |
} |
584 |
|
585 |
uint32_t HELPER(rtlb0)(uint32_t v, uint32_t dtlb) |
586 |
{ |
587 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
588 |
uint32_t wi; |
589 |
const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi);
|
590 |
return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
|
591 |
} else {
|
592 |
return v & REGION_PAGE_MASK;
|
593 |
} |
594 |
} |
595 |
|
596 |
uint32_t HELPER(rtlb1)(uint32_t v, uint32_t dtlb) |
597 |
{ |
598 |
const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, NULL); |
599 |
return entry->paddr | entry->attr;
|
600 |
} |
601 |
|
602 |
void HELPER(itlb)(uint32_t v, uint32_t dtlb)
|
603 |
{ |
604 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
605 |
uint32_t wi; |
606 |
xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi); |
607 |
if (entry->variable && entry->asid) {
|
608 |
tlb_flush_page(env, entry->vaddr); |
609 |
entry->asid = 0;
|
610 |
} |
611 |
} |
612 |
} |
613 |
|
614 |
uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb) |
615 |
{ |
616 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
617 |
uint32_t wi; |
618 |
uint32_t ei; |
619 |
uint8_t ring; |
620 |
int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
|
621 |
|
622 |
switch (res) {
|
623 |
case 0: |
624 |
if (ring >= xtensa_get_ring(env)) {
|
625 |
return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); |
626 |
} |
627 |
break;
|
628 |
|
629 |
case INST_TLB_MULTI_HIT_CAUSE:
|
630 |
case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
|
631 |
HELPER(exception_cause_vaddr)(env->pc, res, v); |
632 |
break;
|
633 |
} |
634 |
return 0; |
635 |
} else {
|
636 |
return (v & REGION_PAGE_MASK) | 0x1; |
637 |
} |
638 |
} |
639 |
|
640 |
void xtensa_tlb_set_entry(CPUState *env, bool dtlb, |
641 |
unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) |
642 |
{ |
643 |
xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); |
644 |
|
645 |
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
|
646 |
if (entry->variable) {
|
647 |
if (entry->asid) {
|
648 |
tlb_flush_page(env, entry->vaddr); |
649 |
} |
650 |
entry->vaddr = vpn; |
651 |
entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); |
652 |
entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; |
653 |
entry->attr = pte & 0xf;
|
654 |
} else {
|
655 |
qemu_log("%s %d, %d, %d trying to set immutable entry\n",
|
656 |
__func__, dtlb, wi, ei); |
657 |
} |
658 |
} else {
|
659 |
tlb_flush_page(env, entry->vaddr); |
660 |
if (xtensa_option_enabled(env->config,
|
661 |
XTENSA_OPTION_REGION_TRANSLATION)) { |
662 |
entry->paddr = pte & REGION_PAGE_MASK; |
663 |
} |
664 |
entry->attr = pte & 0xf;
|
665 |
} |
666 |
} |
667 |
|
668 |
void HELPER(wtlb)(uint32_t p, uint32_t v, uint32_t dtlb)
|
669 |
{ |
670 |
uint32_t vpn; |
671 |
uint32_t wi; |
672 |
uint32_t ei; |
673 |
split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei); |
674 |
xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); |
675 |
} |