root / cputlb.c @ 182735ef
History | View | Annotate | Download (11.3 kB)
1 |
/*
|
---|---|
2 |
* Common CPU TLB handling
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
|
20 |
#include "config.h" |
21 |
#include "cpu.h" |
22 |
#include "exec/exec-all.h" |
23 |
#include "exec/memory.h" |
24 |
#include "exec/address-spaces.h" |
25 |
|
26 |
#include "exec/cputlb.h" |
27 |
|
28 |
#include "exec/memory-internal.h" |
29 |
|
30 |
//#define DEBUG_TLB
|
31 |
//#define DEBUG_TLB_CHECK
|
32 |
|
33 |
/* statistics */
|
34 |
int tlb_flush_count;
|
35 |
|
36 |
static const CPUTLBEntry s_cputlb_empty_entry = { |
37 |
.addr_read = -1,
|
38 |
.addr_write = -1,
|
39 |
.addr_code = -1,
|
40 |
.addend = -1,
|
41 |
}; |
42 |
|
43 |
/* NOTE:
|
44 |
* If flush_global is true (the usual case), flush all tlb entries.
|
45 |
* If flush_global is false, flush (at least) all tlb entries not
|
46 |
* marked global.
|
47 |
*
|
48 |
* Since QEMU doesn't currently implement a global/not-global flag
|
49 |
* for tlb entries, at the moment tlb_flush() will also flush all
|
50 |
* tlb entries in the flush_global == false case. This is OK because
|
51 |
* CPU architectures generally permit an implementation to drop
|
52 |
* entries from the TLB at any time, so flushing more entries than
|
53 |
* required is only an efficiency issue, not a correctness issue.
|
54 |
*/
|
55 |
void tlb_flush(CPUArchState *env, int flush_global) |
56 |
{ |
57 |
CPUState *cpu = ENV_GET_CPU(env); |
58 |
int i;
|
59 |
|
60 |
#if defined(DEBUG_TLB)
|
61 |
printf("tlb_flush:\n");
|
62 |
#endif
|
63 |
/* must reset current TB so that interrupts cannot modify the
|
64 |
links while we are modifying them */
|
65 |
cpu->current_tb = NULL;
|
66 |
|
67 |
for (i = 0; i < CPU_TLB_SIZE; i++) { |
68 |
int mmu_idx;
|
69 |
|
70 |
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
71 |
env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; |
72 |
} |
73 |
} |
74 |
|
75 |
memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
76 |
|
77 |
env->tlb_flush_addr = -1;
|
78 |
env->tlb_flush_mask = 0;
|
79 |
tlb_flush_count++; |
80 |
} |
81 |
|
82 |
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
83 |
{ |
84 |
if (addr == (tlb_entry->addr_read &
|
85 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
86 |
addr == (tlb_entry->addr_write & |
87 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
88 |
addr == (tlb_entry->addr_code & |
89 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
90 |
*tlb_entry = s_cputlb_empty_entry; |
91 |
} |
92 |
} |
93 |
|
94 |
void tlb_flush_page(CPUArchState *env, target_ulong addr)
|
95 |
{ |
96 |
CPUState *cpu = ENV_GET_CPU(env); |
97 |
int i;
|
98 |
int mmu_idx;
|
99 |
|
100 |
#if defined(DEBUG_TLB)
|
101 |
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
102 |
#endif
|
103 |
/* Check if we need to flush due to large pages. */
|
104 |
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
|
105 |
#if defined(DEBUG_TLB)
|
106 |
printf("tlb_flush_page: forced full flush ("
|
107 |
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
108 |
env->tlb_flush_addr, env->tlb_flush_mask); |
109 |
#endif
|
110 |
tlb_flush(env, 1);
|
111 |
return;
|
112 |
} |
113 |
/* must reset current TB so that interrupts cannot modify the
|
114 |
links while we are modifying them */
|
115 |
cpu->current_tb = NULL;
|
116 |
|
117 |
addr &= TARGET_PAGE_MASK; |
118 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
119 |
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
120 |
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); |
121 |
} |
122 |
|
123 |
tb_flush_jmp_cache(env, addr); |
124 |
} |
125 |
|
126 |
/* update the TLBs so that writes to code in the virtual page 'addr'
|
127 |
can be detected */
|
128 |
void tlb_protect_code(ram_addr_t ram_addr)
|
129 |
{ |
130 |
cpu_physical_memory_reset_dirty(ram_addr, |
131 |
ram_addr + TARGET_PAGE_SIZE, |
132 |
CODE_DIRTY_FLAG); |
133 |
} |
134 |
|
135 |
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
136 |
tested for self modifying code */
|
137 |
void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
|
138 |
target_ulong vaddr) |
139 |
{ |
140 |
cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); |
141 |
} |
142 |
|
143 |
static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe) |
144 |
{ |
145 |
return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0; |
146 |
} |
147 |
|
148 |
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
|
149 |
uintptr_t length) |
150 |
{ |
151 |
uintptr_t addr; |
152 |
|
153 |
if (tlb_is_dirty_ram(tlb_entry)) {
|
154 |
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
155 |
if ((addr - start) < length) {
|
156 |
tlb_entry->addr_write |= TLB_NOTDIRTY; |
157 |
} |
158 |
} |
159 |
} |
160 |
|
161 |
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
162 |
{ |
163 |
ram_addr_t ram_addr; |
164 |
|
165 |
if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) { |
166 |
fprintf(stderr, "Bad ram pointer %p\n", ptr);
|
167 |
abort(); |
168 |
} |
169 |
return ram_addr;
|
170 |
} |
171 |
|
172 |
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
173 |
{ |
174 |
ram_addr_t ram_addr; |
175 |
void *p;
|
176 |
|
177 |
if (tlb_is_dirty_ram(tlb_entry)) {
|
178 |
p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK)
|
179 |
+ tlb_entry->addend); |
180 |
ram_addr = qemu_ram_addr_from_host_nofail(p); |
181 |
if (!cpu_physical_memory_is_dirty(ram_addr)) {
|
182 |
tlb_entry->addr_write |= TLB_NOTDIRTY; |
183 |
} |
184 |
} |
185 |
} |
186 |
|
187 |
void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
|
188 |
{ |
189 |
CPUState *cpu; |
190 |
CPUArchState *env; |
191 |
|
192 |
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { |
193 |
int mmu_idx;
|
194 |
|
195 |
env = cpu->env_ptr; |
196 |
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
197 |
unsigned int i; |
198 |
|
199 |
for (i = 0; i < CPU_TLB_SIZE; i++) { |
200 |
tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], |
201 |
start1, length); |
202 |
} |
203 |
} |
204 |
} |
205 |
} |
206 |
|
207 |
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
208 |
{ |
209 |
if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
|
210 |
tlb_entry->addr_write = vaddr; |
211 |
} |
212 |
} |
213 |
|
214 |
/* update the TLB corresponding to virtual page vaddr
|
215 |
so that it is no longer dirty */
|
216 |
void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
|
217 |
{ |
218 |
int i;
|
219 |
int mmu_idx;
|
220 |
|
221 |
vaddr &= TARGET_PAGE_MASK; |
222 |
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
223 |
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
224 |
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); |
225 |
} |
226 |
} |
227 |
|
228 |
/* Our TLB does not support large pages, so remember the area covered by
|
229 |
large pages and trigger a full TLB flush if these are invalidated. */
|
230 |
static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, |
231 |
target_ulong size) |
232 |
{ |
233 |
target_ulong mask = ~(size - 1);
|
234 |
|
235 |
if (env->tlb_flush_addr == (target_ulong)-1) { |
236 |
env->tlb_flush_addr = vaddr & mask; |
237 |
env->tlb_flush_mask = mask; |
238 |
return;
|
239 |
} |
240 |
/* Extend the existing region to include the new page.
|
241 |
This is a compromise between unnecessary flushes and the cost
|
242 |
of maintaining a full variable size TLB. */
|
243 |
mask &= env->tlb_flush_mask; |
244 |
while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { |
245 |
mask <<= 1;
|
246 |
} |
247 |
env->tlb_flush_addr &= mask; |
248 |
env->tlb_flush_mask = mask; |
249 |
} |
250 |
|
251 |
/* Add a new TLB entry. At most one entry for a given virtual address
|
252 |
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
253 |
supplied size is only used by tlb_flush_page. */
|
254 |
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
|
255 |
hwaddr paddr, int prot,
|
256 |
int mmu_idx, target_ulong size)
|
257 |
{ |
258 |
MemoryRegionSection *section; |
259 |
unsigned int index; |
260 |
target_ulong address; |
261 |
target_ulong code_address; |
262 |
uintptr_t addend; |
263 |
CPUTLBEntry *te; |
264 |
hwaddr iotlb, xlat, sz; |
265 |
|
266 |
assert(size >= TARGET_PAGE_SIZE); |
267 |
if (size != TARGET_PAGE_SIZE) {
|
268 |
tlb_add_large_page(env, vaddr, size); |
269 |
} |
270 |
|
271 |
sz = size; |
272 |
section = address_space_translate_for_iotlb(&address_space_memory, paddr, |
273 |
&xlat, &sz); |
274 |
assert(sz >= TARGET_PAGE_SIZE); |
275 |
|
276 |
#if defined(DEBUG_TLB)
|
277 |
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
278 |
" prot=%x idx=%d\n",
|
279 |
vaddr, paddr, prot, mmu_idx); |
280 |
#endif
|
281 |
|
282 |
address = vaddr; |
283 |
if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
|
284 |
/* IO memory case */
|
285 |
address |= TLB_MMIO; |
286 |
addend = 0;
|
287 |
} else {
|
288 |
/* TLB_MMIO for rom/romd handled below */
|
289 |
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; |
290 |
} |
291 |
|
292 |
code_address = address; |
293 |
iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, xlat, |
294 |
prot, &address); |
295 |
|
296 |
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
297 |
env->iotlb[mmu_idx][index] = iotlb - vaddr; |
298 |
te = &env->tlb_table[mmu_idx][index]; |
299 |
te->addend = addend - vaddr; |
300 |
if (prot & PAGE_READ) {
|
301 |
te->addr_read = address; |
302 |
} else {
|
303 |
te->addr_read = -1;
|
304 |
} |
305 |
|
306 |
if (prot & PAGE_EXEC) {
|
307 |
te->addr_code = code_address; |
308 |
} else {
|
309 |
te->addr_code = -1;
|
310 |
} |
311 |
if (prot & PAGE_WRITE) {
|
312 |
if ((memory_region_is_ram(section->mr) && section->readonly)
|
313 |
|| memory_region_is_romd(section->mr)) { |
314 |
/* Write access calls the I/O callback. */
|
315 |
te->addr_write = address | TLB_MMIO; |
316 |
} else if (memory_region_is_ram(section->mr) |
317 |
&& !cpu_physical_memory_is_dirty(section->mr->ram_addr + xlat)) { |
318 |
te->addr_write = address | TLB_NOTDIRTY; |
319 |
} else {
|
320 |
te->addr_write = address; |
321 |
} |
322 |
} else {
|
323 |
te->addr_write = -1;
|
324 |
} |
325 |
} |
326 |
|
327 |
/* NOTE: this function can trigger an exception */
|
328 |
/* NOTE2: the returned address is not exactly the physical address: it
|
329 |
* is actually a ram_addr_t (in system mode; the user mode emulation
|
330 |
* version of this function returns a guest virtual address).
|
331 |
*/
|
332 |
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
333 |
{ |
334 |
int mmu_idx, page_index, pd;
|
335 |
void *p;
|
336 |
MemoryRegion *mr; |
337 |
|
338 |
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
339 |
mmu_idx = cpu_mmu_index(env1); |
340 |
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
|
341 |
(addr & TARGET_PAGE_MASK))) { |
342 |
cpu_ldub_code(env1, addr); |
343 |
} |
344 |
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; |
345 |
mr = iotlb_to_region(pd); |
346 |
if (memory_region_is_unassigned(mr)) {
|
347 |
CPUState *cpu = ENV_GET_CPU(env1); |
348 |
CPUClass *cc = CPU_GET_CLASS(cpu); |
349 |
|
350 |
if (cc->do_unassigned_access) {
|
351 |
cc->do_unassigned_access(cpu, addr, false, true, 0, 4); |
352 |
} else {
|
353 |
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x"
|
354 |
TARGET_FMT_lx "\n", addr);
|
355 |
} |
356 |
} |
357 |
p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
|
358 |
return qemu_ram_addr_from_host_nofail(p);
|
359 |
} |
360 |
|
361 |
#define MMUSUFFIX _cmmu
|
362 |
#undef GETPC
|
363 |
#define GETPC() ((uintptr_t)0) |
364 |
#define SOFTMMU_CODE_ACCESS
|
365 |
|
366 |
#define SHIFT 0 |
367 |
#include "exec/softmmu_template.h" |
368 |
|
369 |
#define SHIFT 1 |
370 |
#include "exec/softmmu_template.h" |
371 |
|
372 |
#define SHIFT 2 |
373 |
#include "exec/softmmu_template.h" |
374 |
|
375 |
#define SHIFT 3 |
376 |
#include "exec/softmmu_template.h" |
377 |
|
378 |
#undef env
|