root / exec.c @ 24addbc7
History | View | Annotate | Download (72.3 kB)
1 |
/*
|
---|---|
2 |
* Virtual page mapping
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
18 |
*/
|
19 |
#include "config.h" |
20 |
#ifdef _WIN32
|
21 |
#include <windows.h> |
22 |
#else
|
23 |
#include <sys/types.h> |
24 |
#include <sys/mman.h> |
25 |
#endif
|
26 |
|
27 |
#include "qemu-common.h" |
28 |
#include "cpu.h" |
29 |
#include "tcg.h" |
30 |
#include "hw/hw.h" |
31 |
#include "hw/qdev.h" |
32 |
#include "qemu/osdep.h" |
33 |
#include "sysemu/kvm.h" |
34 |
#include "hw/xen/xen.h" |
35 |
#include "qemu/timer.h" |
36 |
#include "qemu/config-file.h" |
37 |
#include "exec/memory.h" |
38 |
#include "sysemu/dma.h" |
39 |
#include "exec/address-spaces.h" |
40 |
#if defined(CONFIG_USER_ONLY)
|
41 |
#include <qemu.h> |
42 |
#else /* !CONFIG_USER_ONLY */ |
43 |
#include "sysemu/xen-mapcache.h" |
44 |
#include "trace.h" |
45 |
#endif
|
46 |
#include "exec/cpu-all.h" |
47 |
|
48 |
#include "exec/cputlb.h" |
49 |
#include "translate-all.h" |
50 |
|
51 |
#include "exec/memory-internal.h" |
52 |
|
53 |
//#define DEBUG_SUBPAGE
|
54 |
|
55 |
#if !defined(CONFIG_USER_ONLY)
|
56 |
int phys_ram_fd;
|
57 |
static int in_migration; |
58 |
|
59 |
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; |
60 |
|
61 |
static MemoryRegion *system_memory;
|
62 |
static MemoryRegion *system_io;
|
63 |
|
64 |
AddressSpace address_space_io; |
65 |
AddressSpace address_space_memory; |
66 |
DMAContext dma_context_memory; |
67 |
|
68 |
MemoryRegion io_mem_rom, io_mem_notdirty; |
69 |
static MemoryRegion io_mem_unassigned;
|
70 |
|
71 |
#endif
|
72 |
|
73 |
CPUArchState *first_cpu; |
74 |
/* current CPU in the current thread. It is only valid inside
|
75 |
cpu_exec() */
|
76 |
DEFINE_TLS(CPUArchState *,cpu_single_env); |
77 |
/* 0 = Do not count executed instructions.
|
78 |
1 = Precise instruction counting.
|
79 |
2 = Adaptive rate instruction counting. */
|
80 |
int use_icount;
|
81 |
|
82 |
#if !defined(CONFIG_USER_ONLY)
|
83 |
|
84 |
typedef struct PhysPageEntry PhysPageEntry; |
85 |
|
86 |
struct PhysPageEntry {
|
87 |
uint16_t is_leaf : 1;
|
88 |
/* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
|
89 |
uint16_t ptr : 15;
|
90 |
}; |
91 |
|
92 |
struct AddressSpaceDispatch {
|
93 |
/* This is a multi-level map on the physical address space.
|
94 |
* The bottom level has pointers to MemoryRegionSections.
|
95 |
*/
|
96 |
PhysPageEntry phys_map; |
97 |
MemoryListener listener; |
98 |
AddressSpace *as; |
99 |
}; |
100 |
|
101 |
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
|
102 |
typedef struct subpage_t { |
103 |
MemoryRegion iomem; |
104 |
AddressSpace *as; |
105 |
hwaddr base; |
106 |
uint16_t sub_section[TARGET_PAGE_SIZE]; |
107 |
} subpage_t; |
108 |
|
109 |
static MemoryRegionSection *phys_sections;
|
110 |
static unsigned phys_sections_nb, phys_sections_nb_alloc; |
111 |
static uint16_t phys_section_unassigned;
|
112 |
static uint16_t phys_section_notdirty;
|
113 |
static uint16_t phys_section_rom;
|
114 |
static uint16_t phys_section_watch;
|
115 |
|
116 |
/* Simple allocator for PhysPageEntry nodes */
|
117 |
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
|
118 |
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; |
119 |
|
120 |
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1) |
121 |
|
122 |
static void io_mem_init(void); |
123 |
static void memory_map_init(void); |
124 |
static void *qemu_safe_ram_ptr(ram_addr_t addr); |
125 |
|
126 |
static MemoryRegion io_mem_watch;
|
127 |
#endif
|
128 |
|
129 |
#if !defined(CONFIG_USER_ONLY)
|
130 |
|
131 |
static void phys_map_node_reserve(unsigned nodes) |
132 |
{ |
133 |
if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
|
134 |
typedef PhysPageEntry Node[L2_SIZE];
|
135 |
phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16); |
136 |
phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc, |
137 |
phys_map_nodes_nb + nodes); |
138 |
phys_map_nodes = g_renew(Node, phys_map_nodes, |
139 |
phys_map_nodes_nb_alloc); |
140 |
} |
141 |
} |
142 |
|
143 |
static uint16_t phys_map_node_alloc(void) |
144 |
{ |
145 |
unsigned i;
|
146 |
uint16_t ret; |
147 |
|
148 |
ret = phys_map_nodes_nb++; |
149 |
assert(ret != PHYS_MAP_NODE_NIL); |
150 |
assert(ret != phys_map_nodes_nb_alloc); |
151 |
for (i = 0; i < L2_SIZE; ++i) { |
152 |
phys_map_nodes[ret][i].is_leaf = 0;
|
153 |
phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; |
154 |
} |
155 |
return ret;
|
156 |
} |
157 |
|
158 |
static void phys_map_nodes_reset(void) |
159 |
{ |
160 |
phys_map_nodes_nb = 0;
|
161 |
} |
162 |
|
163 |
|
164 |
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index, |
165 |
hwaddr *nb, uint16_t leaf, |
166 |
int level)
|
167 |
{ |
168 |
PhysPageEntry *p; |
169 |
int i;
|
170 |
hwaddr step = (hwaddr)1 << (level * L2_BITS);
|
171 |
|
172 |
if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
|
173 |
lp->ptr = phys_map_node_alloc(); |
174 |
p = phys_map_nodes[lp->ptr]; |
175 |
if (level == 0) { |
176 |
for (i = 0; i < L2_SIZE; i++) { |
177 |
p[i].is_leaf = 1;
|
178 |
p[i].ptr = phys_section_unassigned; |
179 |
} |
180 |
} |
181 |
} else {
|
182 |
p = phys_map_nodes[lp->ptr]; |
183 |
} |
184 |
lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
|
185 |
|
186 |
while (*nb && lp < &p[L2_SIZE]) {
|
187 |
if ((*index & (step - 1)) == 0 && *nb >= step) { |
188 |
lp->is_leaf = true;
|
189 |
lp->ptr = leaf; |
190 |
*index += step; |
191 |
*nb -= step; |
192 |
} else {
|
193 |
phys_page_set_level(lp, index, nb, leaf, level - 1);
|
194 |
} |
195 |
++lp; |
196 |
} |
197 |
} |
198 |
|
199 |
static void phys_page_set(AddressSpaceDispatch *d, |
200 |
hwaddr index, hwaddr nb, |
201 |
uint16_t leaf) |
202 |
{ |
203 |
/* Wildly overreserve - it doesn't matter much. */
|
204 |
phys_map_node_reserve(3 * P_L2_LEVELS);
|
205 |
|
206 |
phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
|
207 |
} |
208 |
|
209 |
static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
|
210 |
{ |
211 |
PhysPageEntry lp = d->phys_map; |
212 |
PhysPageEntry *p; |
213 |
int i;
|
214 |
|
215 |
for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { |
216 |
if (lp.ptr == PHYS_MAP_NODE_NIL) {
|
217 |
return &phys_sections[phys_section_unassigned];
|
218 |
} |
219 |
p = phys_map_nodes[lp.ptr]; |
220 |
lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
|
221 |
} |
222 |
return &phys_sections[lp.ptr];
|
223 |
} |
224 |
|
225 |
bool memory_region_is_unassigned(MemoryRegion *mr)
|
226 |
{ |
227 |
return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
|
228 |
&& mr != &io_mem_watch; |
229 |
} |
230 |
|
231 |
static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
|
232 |
hwaddr addr, |
233 |
bool resolve_subpage)
|
234 |
{ |
235 |
MemoryRegionSection *section; |
236 |
subpage_t *subpage; |
237 |
|
238 |
section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS); |
239 |
if (resolve_subpage && section->mr->subpage) {
|
240 |
subpage = container_of(section->mr, subpage_t, iomem); |
241 |
section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; |
242 |
} |
243 |
return section;
|
244 |
} |
245 |
|
246 |
static MemoryRegionSection *
|
247 |
address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat, |
248 |
hwaddr *plen, bool resolve_subpage)
|
249 |
{ |
250 |
MemoryRegionSection *section; |
251 |
Int128 diff; |
252 |
|
253 |
section = address_space_lookup_region(as, addr, resolve_subpage); |
254 |
/* Compute offset within MemoryRegionSection */
|
255 |
addr -= section->offset_within_address_space; |
256 |
|
257 |
/* Compute offset within MemoryRegion */
|
258 |
*xlat = addr + section->offset_within_region; |
259 |
|
260 |
diff = int128_sub(section->mr->size, int128_make64(addr)); |
261 |
*plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
262 |
return section;
|
263 |
} |
264 |
|
265 |
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, |
266 |
hwaddr *xlat, hwaddr *plen, |
267 |
bool is_write)
|
268 |
{ |
269 |
IOMMUTLBEntry iotlb; |
270 |
MemoryRegionSection *section; |
271 |
MemoryRegion *mr; |
272 |
hwaddr len = *plen; |
273 |
|
274 |
for (;;) {
|
275 |
section = address_space_translate_internal(as, addr, &addr, plen, true);
|
276 |
mr = section->mr; |
277 |
|
278 |
if (!mr->iommu_ops) {
|
279 |
break;
|
280 |
} |
281 |
|
282 |
iotlb = mr->iommu_ops->translate(mr, addr); |
283 |
addr = ((iotlb.translated_addr & ~iotlb.addr_mask) |
284 |
| (addr & iotlb.addr_mask)); |
285 |
len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
|
286 |
if (!(iotlb.perm & (1 << is_write))) { |
287 |
mr = &io_mem_unassigned; |
288 |
break;
|
289 |
} |
290 |
|
291 |
as = iotlb.target_as; |
292 |
} |
293 |
|
294 |
*plen = len; |
295 |
*xlat = addr; |
296 |
return mr;
|
297 |
} |
298 |
|
299 |
MemoryRegionSection * |
300 |
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, |
301 |
hwaddr *plen) |
302 |
{ |
303 |
MemoryRegionSection *section; |
304 |
section = address_space_translate_internal(as, addr, xlat, plen, false);
|
305 |
|
306 |
assert(!section->mr->iommu_ops); |
307 |
return section;
|
308 |
} |
309 |
#endif
|
310 |
|
311 |
void cpu_exec_init_all(void) |
312 |
{ |
313 |
#if !defined(CONFIG_USER_ONLY)
|
314 |
qemu_mutex_init(&ram_list.mutex); |
315 |
memory_map_init(); |
316 |
io_mem_init(); |
317 |
#endif
|
318 |
} |
319 |
|
320 |
#if !defined(CONFIG_USER_ONLY)
|
321 |
|
322 |
static int cpu_common_post_load(void *opaque, int version_id) |
323 |
{ |
324 |
CPUState *cpu = opaque; |
325 |
|
326 |
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
|
327 |
version_id is increased. */
|
328 |
cpu->interrupt_request &= ~0x01;
|
329 |
tlb_flush(cpu->env_ptr, 1);
|
330 |
|
331 |
return 0; |
332 |
} |
333 |
|
334 |
static const VMStateDescription vmstate_cpu_common = { |
335 |
.name = "cpu_common",
|
336 |
.version_id = 1,
|
337 |
.minimum_version_id = 1,
|
338 |
.minimum_version_id_old = 1,
|
339 |
.post_load = cpu_common_post_load, |
340 |
.fields = (VMStateField []) { |
341 |
VMSTATE_UINT32(halted, CPUState), |
342 |
VMSTATE_UINT32(interrupt_request, CPUState), |
343 |
VMSTATE_END_OF_LIST() |
344 |
} |
345 |
}; |
346 |
#else
|
347 |
#define vmstate_cpu_common vmstate_dummy
|
348 |
#endif
|
349 |
|
350 |
CPUState *qemu_get_cpu(int index)
|
351 |
{ |
352 |
CPUArchState *env = first_cpu; |
353 |
CPUState *cpu = NULL;
|
354 |
|
355 |
while (env) {
|
356 |
cpu = ENV_GET_CPU(env); |
357 |
if (cpu->cpu_index == index) {
|
358 |
break;
|
359 |
} |
360 |
env = env->next_cpu; |
361 |
} |
362 |
|
363 |
return env ? cpu : NULL; |
364 |
} |
365 |
|
366 |
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data) |
367 |
{ |
368 |
CPUArchState *env = first_cpu; |
369 |
|
370 |
while (env) {
|
371 |
func(ENV_GET_CPU(env), data); |
372 |
env = env->next_cpu; |
373 |
} |
374 |
} |
375 |
|
376 |
void cpu_exec_init(CPUArchState *env)
|
377 |
{ |
378 |
CPUState *cpu = ENV_GET_CPU(env); |
379 |
CPUClass *cc = CPU_GET_CLASS(cpu); |
380 |
CPUArchState **penv; |
381 |
int cpu_index;
|
382 |
|
383 |
#if defined(CONFIG_USER_ONLY)
|
384 |
cpu_list_lock(); |
385 |
#endif
|
386 |
env->next_cpu = NULL;
|
387 |
penv = &first_cpu; |
388 |
cpu_index = 0;
|
389 |
while (*penv != NULL) { |
390 |
penv = &(*penv)->next_cpu; |
391 |
cpu_index++; |
392 |
} |
393 |
cpu->cpu_index = cpu_index; |
394 |
cpu->numa_node = 0;
|
395 |
QTAILQ_INIT(&env->breakpoints); |
396 |
QTAILQ_INIT(&env->watchpoints); |
397 |
#ifndef CONFIG_USER_ONLY
|
398 |
cpu->thread_id = qemu_get_thread_id(); |
399 |
#endif
|
400 |
*penv = env; |
401 |
#if defined(CONFIG_USER_ONLY)
|
402 |
cpu_list_unlock(); |
403 |
#endif
|
404 |
vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
|
405 |
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
|
406 |
register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, |
407 |
cpu_save, cpu_load, env); |
408 |
assert(cc->vmsd == NULL);
|
409 |
#endif
|
410 |
if (cc->vmsd != NULL) { |
411 |
vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
|
412 |
} |
413 |
} |
414 |
|
415 |
#if defined(TARGET_HAS_ICE)
|
416 |
#if defined(CONFIG_USER_ONLY)
|
417 |
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
418 |
{ |
419 |
tb_invalidate_phys_page_range(pc, pc + 1, 0); |
420 |
} |
421 |
#else
|
422 |
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
423 |
{ |
424 |
tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | |
425 |
(pc & ~TARGET_PAGE_MASK)); |
426 |
} |
427 |
#endif
|
428 |
#endif /* TARGET_HAS_ICE */ |
429 |
|
430 |
#if defined(CONFIG_USER_ONLY)
|
431 |
void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
432 |
|
433 |
{ |
434 |
} |
435 |
|
436 |
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
|
437 |
int flags, CPUWatchpoint **watchpoint)
|
438 |
{ |
439 |
return -ENOSYS;
|
440 |
} |
441 |
#else
|
442 |
/* Add a watchpoint. */
|
443 |
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
|
444 |
int flags, CPUWatchpoint **watchpoint)
|
445 |
{ |
446 |
target_ulong len_mask = ~(len - 1);
|
447 |
CPUWatchpoint *wp; |
448 |
|
449 |
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
|
450 |
if ((len & (len - 1)) || (addr & ~len_mask) || |
451 |
len == 0 || len > TARGET_PAGE_SIZE) {
|
452 |
fprintf(stderr, "qemu: tried to set invalid watchpoint at "
|
453 |
TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); |
454 |
return -EINVAL;
|
455 |
} |
456 |
wp = g_malloc(sizeof(*wp));
|
457 |
|
458 |
wp->vaddr = addr; |
459 |
wp->len_mask = len_mask; |
460 |
wp->flags = flags; |
461 |
|
462 |
/* keep all GDB-injected watchpoints in front */
|
463 |
if (flags & BP_GDB)
|
464 |
QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
465 |
else
|
466 |
QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
467 |
|
468 |
tlb_flush_page(env, addr); |
469 |
|
470 |
if (watchpoint)
|
471 |
*watchpoint = wp; |
472 |
return 0; |
473 |
} |
474 |
|
475 |
/* Remove a specific watchpoint. */
|
476 |
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
|
477 |
int flags)
|
478 |
{ |
479 |
target_ulong len_mask = ~(len - 1);
|
480 |
CPUWatchpoint *wp; |
481 |
|
482 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
483 |
if (addr == wp->vaddr && len_mask == wp->len_mask
|
484 |
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
485 |
cpu_watchpoint_remove_by_ref(env, wp); |
486 |
return 0; |
487 |
} |
488 |
} |
489 |
return -ENOENT;
|
490 |
} |
491 |
|
492 |
/* Remove a specific watchpoint by reference. */
|
493 |
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
|
494 |
{ |
495 |
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
496 |
|
497 |
tlb_flush_page(env, watchpoint->vaddr); |
498 |
|
499 |
g_free(watchpoint); |
500 |
} |
501 |
|
502 |
/* Remove all matching watchpoints. */
|
503 |
void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
504 |
{ |
505 |
CPUWatchpoint *wp, *next; |
506 |
|
507 |
QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
508 |
if (wp->flags & mask)
|
509 |
cpu_watchpoint_remove_by_ref(env, wp); |
510 |
} |
511 |
} |
512 |
#endif
|
513 |
|
514 |
/* Add a breakpoint. */
|
515 |
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
516 |
CPUBreakpoint **breakpoint) |
517 |
{ |
518 |
#if defined(TARGET_HAS_ICE)
|
519 |
CPUBreakpoint *bp; |
520 |
|
521 |
bp = g_malloc(sizeof(*bp));
|
522 |
|
523 |
bp->pc = pc; |
524 |
bp->flags = flags; |
525 |
|
526 |
/* keep all GDB-injected breakpoints in front */
|
527 |
if (flags & BP_GDB)
|
528 |
QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
529 |
else
|
530 |
QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
531 |
|
532 |
breakpoint_invalidate(env, pc); |
533 |
|
534 |
if (breakpoint)
|
535 |
*breakpoint = bp; |
536 |
return 0; |
537 |
#else
|
538 |
return -ENOSYS;
|
539 |
#endif
|
540 |
} |
541 |
|
542 |
/* Remove a specific breakpoint. */
|
543 |
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) |
544 |
{ |
545 |
#if defined(TARGET_HAS_ICE)
|
546 |
CPUBreakpoint *bp; |
547 |
|
548 |
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
549 |
if (bp->pc == pc && bp->flags == flags) {
|
550 |
cpu_breakpoint_remove_by_ref(env, bp); |
551 |
return 0; |
552 |
} |
553 |
} |
554 |
return -ENOENT;
|
555 |
#else
|
556 |
return -ENOSYS;
|
557 |
#endif
|
558 |
} |
559 |
|
560 |
/* Remove a specific breakpoint by reference. */
|
561 |
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
|
562 |
{ |
563 |
#if defined(TARGET_HAS_ICE)
|
564 |
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
565 |
|
566 |
breakpoint_invalidate(env, breakpoint->pc); |
567 |
|
568 |
g_free(breakpoint); |
569 |
#endif
|
570 |
} |
571 |
|
572 |
/* Remove all matching breakpoints. */
|
573 |
void cpu_breakpoint_remove_all(CPUArchState *env, int mask) |
574 |
{ |
575 |
#if defined(TARGET_HAS_ICE)
|
576 |
CPUBreakpoint *bp, *next; |
577 |
|
578 |
QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
579 |
if (bp->flags & mask)
|
580 |
cpu_breakpoint_remove_by_ref(env, bp); |
581 |
} |
582 |
#endif
|
583 |
} |
584 |
|
585 |
/* enable or disable single step mode. EXCP_DEBUG is returned by the
|
586 |
CPU loop after each instruction */
|
587 |
void cpu_single_step(CPUArchState *env, int enabled) |
588 |
{ |
589 |
#if defined(TARGET_HAS_ICE)
|
590 |
if (env->singlestep_enabled != enabled) {
|
591 |
env->singlestep_enabled = enabled; |
592 |
if (kvm_enabled())
|
593 |
kvm_update_guest_debug(env, 0);
|
594 |
else {
|
595 |
/* must flush all the translated code to avoid inconsistencies */
|
596 |
/* XXX: only flush what is necessary */
|
597 |
tb_flush(env); |
598 |
} |
599 |
} |
600 |
#endif
|
601 |
} |
602 |
|
603 |
void cpu_exit(CPUArchState *env)
|
604 |
{ |
605 |
CPUState *cpu = ENV_GET_CPU(env); |
606 |
|
607 |
cpu->exit_request = 1;
|
608 |
cpu->tcg_exit_req = 1;
|
609 |
} |
610 |
|
611 |
void cpu_abort(CPUArchState *env, const char *fmt, ...) |
612 |
{ |
613 |
va_list ap; |
614 |
va_list ap2; |
615 |
|
616 |
va_start(ap, fmt); |
617 |
va_copy(ap2, ap); |
618 |
fprintf(stderr, "qemu: fatal: ");
|
619 |
vfprintf(stderr, fmt, ap); |
620 |
fprintf(stderr, "\n");
|
621 |
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
622 |
if (qemu_log_enabled()) {
|
623 |
qemu_log("qemu: fatal: ");
|
624 |
qemu_log_vprintf(fmt, ap2); |
625 |
qemu_log("\n");
|
626 |
log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
627 |
qemu_log_flush(); |
628 |
qemu_log_close(); |
629 |
} |
630 |
va_end(ap2); |
631 |
va_end(ap); |
632 |
#if defined(CONFIG_USER_ONLY)
|
633 |
{ |
634 |
struct sigaction act;
|
635 |
sigfillset(&act.sa_mask); |
636 |
act.sa_handler = SIG_DFL; |
637 |
sigaction(SIGABRT, &act, NULL);
|
638 |
} |
639 |
#endif
|
640 |
abort(); |
641 |
} |
642 |
|
643 |
CPUArchState *cpu_copy(CPUArchState *env) |
644 |
{ |
645 |
CPUArchState *new_env = cpu_init(env->cpu_model_str); |
646 |
CPUArchState *next_cpu = new_env->next_cpu; |
647 |
#if defined(TARGET_HAS_ICE)
|
648 |
CPUBreakpoint *bp; |
649 |
CPUWatchpoint *wp; |
650 |
#endif
|
651 |
|
652 |
memcpy(new_env, env, sizeof(CPUArchState));
|
653 |
|
654 |
/* Preserve chaining. */
|
655 |
new_env->next_cpu = next_cpu; |
656 |
|
657 |
/* Clone all break/watchpoints.
|
658 |
Note: Once we support ptrace with hw-debug register access, make sure
|
659 |
BP_CPU break/watchpoints are handled correctly on clone. */
|
660 |
QTAILQ_INIT(&env->breakpoints); |
661 |
QTAILQ_INIT(&env->watchpoints); |
662 |
#if defined(TARGET_HAS_ICE)
|
663 |
QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
664 |
cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
|
665 |
} |
666 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
667 |
cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
|
668 |
wp->flags, NULL);
|
669 |
} |
670 |
#endif
|
671 |
|
672 |
return new_env;
|
673 |
} |
674 |
|
675 |
#if !defined(CONFIG_USER_ONLY)
|
676 |
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, |
677 |
uintptr_t length) |
678 |
{ |
679 |
uintptr_t start1; |
680 |
|
681 |
/* we modify the TLB cache so that the dirty bit will be set again
|
682 |
when accessing the range */
|
683 |
start1 = (uintptr_t)qemu_safe_ram_ptr(start); |
684 |
/* Check that we don't span multiple blocks - this breaks the
|
685 |
address comparisons below. */
|
686 |
if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 |
687 |
!= (end - 1) - start) {
|
688 |
abort(); |
689 |
} |
690 |
cpu_tlb_reset_dirty_all(start1, length); |
691 |
|
692 |
} |
693 |
|
694 |
/* Note: start and end must be within the same ram block. */
|
695 |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
696 |
int dirty_flags)
|
697 |
{ |
698 |
uintptr_t length; |
699 |
|
700 |
start &= TARGET_PAGE_MASK; |
701 |
end = TARGET_PAGE_ALIGN(end); |
702 |
|
703 |
length = end - start; |
704 |
if (length == 0) |
705 |
return;
|
706 |
cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); |
707 |
|
708 |
if (tcg_enabled()) {
|
709 |
tlb_reset_dirty_range_all(start, end, length); |
710 |
} |
711 |
} |
712 |
|
713 |
static int cpu_physical_memory_set_dirty_tracking(int enable) |
714 |
{ |
715 |
int ret = 0; |
716 |
in_migration = enable; |
717 |
return ret;
|
718 |
} |
719 |
|
720 |
hwaddr memory_region_section_get_iotlb(CPUArchState *env, |
721 |
MemoryRegionSection *section, |
722 |
target_ulong vaddr, |
723 |
hwaddr paddr, hwaddr xlat, |
724 |
int prot,
|
725 |
target_ulong *address) |
726 |
{ |
727 |
hwaddr iotlb; |
728 |
CPUWatchpoint *wp; |
729 |
|
730 |
if (memory_region_is_ram(section->mr)) {
|
731 |
/* Normal RAM. */
|
732 |
iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) |
733 |
+ xlat; |
734 |
if (!section->readonly) {
|
735 |
iotlb |= phys_section_notdirty; |
736 |
} else {
|
737 |
iotlb |= phys_section_rom; |
738 |
} |
739 |
} else {
|
740 |
iotlb = section - phys_sections; |
741 |
iotlb += xlat; |
742 |
} |
743 |
|
744 |
/* Make accesses to pages with watchpoints go via the
|
745 |
watchpoint trap routines. */
|
746 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
747 |
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
|
748 |
/* Avoid trapping reads of pages with a write breakpoint. */
|
749 |
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
|
750 |
iotlb = phys_section_watch + paddr; |
751 |
*address |= TLB_MMIO; |
752 |
break;
|
753 |
} |
754 |
} |
755 |
} |
756 |
|
757 |
return iotlb;
|
758 |
} |
759 |
#endif /* defined(CONFIG_USER_ONLY) */ |
760 |
|
761 |
#if !defined(CONFIG_USER_ONLY)
|
762 |
|
763 |
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
764 |
uint16_t section); |
765 |
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
|
766 |
static void destroy_page_desc(uint16_t section_index) |
767 |
{ |
768 |
MemoryRegionSection *section = &phys_sections[section_index]; |
769 |
MemoryRegion *mr = section->mr; |
770 |
|
771 |
if (mr->subpage) {
|
772 |
subpage_t *subpage = container_of(mr, subpage_t, iomem); |
773 |
memory_region_destroy(&subpage->iomem); |
774 |
g_free(subpage); |
775 |
} |
776 |
} |
777 |
|
778 |
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level) |
779 |
{ |
780 |
unsigned i;
|
781 |
PhysPageEntry *p; |
782 |
|
783 |
if (lp->ptr == PHYS_MAP_NODE_NIL) {
|
784 |
return;
|
785 |
} |
786 |
|
787 |
p = phys_map_nodes[lp->ptr]; |
788 |
for (i = 0; i < L2_SIZE; ++i) { |
789 |
if (!p[i].is_leaf) {
|
790 |
destroy_l2_mapping(&p[i], level - 1);
|
791 |
} else {
|
792 |
destroy_page_desc(p[i].ptr); |
793 |
} |
794 |
} |
795 |
lp->is_leaf = 0;
|
796 |
lp->ptr = PHYS_MAP_NODE_NIL; |
797 |
} |
798 |
|
799 |
static void destroy_all_mappings(AddressSpaceDispatch *d) |
800 |
{ |
801 |
destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
|
802 |
phys_map_nodes_reset(); |
803 |
} |
804 |
|
805 |
static uint16_t phys_section_add(MemoryRegionSection *section)
|
806 |
{ |
807 |
/* The physical section number is ORed with a page-aligned
|
808 |
* pointer to produce the iotlb entries. Thus it should
|
809 |
* never overflow into the page-aligned value.
|
810 |
*/
|
811 |
assert(phys_sections_nb < TARGET_PAGE_SIZE); |
812 |
|
813 |
if (phys_sections_nb == phys_sections_nb_alloc) {
|
814 |
phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16); |
815 |
phys_sections = g_renew(MemoryRegionSection, phys_sections, |
816 |
phys_sections_nb_alloc); |
817 |
} |
818 |
phys_sections[phys_sections_nb] = *section; |
819 |
return phys_sections_nb++;
|
820 |
} |
821 |
|
822 |
static void phys_sections_clear(void) |
823 |
{ |
824 |
phys_sections_nb = 0;
|
825 |
} |
826 |
|
827 |
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
828 |
{ |
829 |
subpage_t *subpage; |
830 |
hwaddr base = section->offset_within_address_space |
831 |
& TARGET_PAGE_MASK; |
832 |
MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS); |
833 |
MemoryRegionSection subsection = { |
834 |
.offset_within_address_space = base, |
835 |
.size = int128_make64(TARGET_PAGE_SIZE), |
836 |
}; |
837 |
hwaddr start, end; |
838 |
|
839 |
assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
840 |
|
841 |
if (!(existing->mr->subpage)) {
|
842 |
subpage = subpage_init(d->as, base); |
843 |
subsection.mr = &subpage->iomem; |
844 |
phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
|
845 |
phys_section_add(&subsection)); |
846 |
} else {
|
847 |
subpage = container_of(existing->mr, subpage_t, iomem); |
848 |
} |
849 |
start = section->offset_within_address_space & ~TARGET_PAGE_MASK; |
850 |
end = start + int128_get64(section->size) - 1;
|
851 |
subpage_register(subpage, start, end, phys_section_add(section)); |
852 |
} |
853 |
|
854 |
|
855 |
static void register_multipage(AddressSpaceDispatch *d, |
856 |
MemoryRegionSection *section) |
857 |
{ |
858 |
hwaddr start_addr = section->offset_within_address_space; |
859 |
uint16_t section_index = phys_section_add(section); |
860 |
uint64_t num_pages = int128_get64(int128_rshift(section->size, |
861 |
TARGET_PAGE_BITS)); |
862 |
|
863 |
assert(num_pages); |
864 |
phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); |
865 |
} |
866 |
|
867 |
static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
868 |
{ |
869 |
AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); |
870 |
MemoryRegionSection now = *section, remain = *section; |
871 |
Int128 page_size = int128_make64(TARGET_PAGE_SIZE); |
872 |
|
873 |
if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
|
874 |
uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) |
875 |
- now.offset_within_address_space; |
876 |
|
877 |
now.size = int128_min(int128_make64(left), now.size); |
878 |
register_subpage(d, &now); |
879 |
} else {
|
880 |
now.size = int128_zero(); |
881 |
} |
882 |
while (int128_ne(remain.size, now.size)) {
|
883 |
remain.size = int128_sub(remain.size, now.size); |
884 |
remain.offset_within_address_space += int128_get64(now.size); |
885 |
remain.offset_within_region += int128_get64(now.size); |
886 |
now = remain; |
887 |
if (int128_lt(remain.size, page_size)) {
|
888 |
register_subpage(d, &now); |
889 |
} else if (remain.offset_within_region & ~TARGET_PAGE_MASK) { |
890 |
now.size = page_size; |
891 |
register_subpage(d, &now); |
892 |
} else {
|
893 |
now.size = int128_and(now.size, int128_neg(page_size)); |
894 |
register_multipage(d, &now); |
895 |
} |
896 |
} |
897 |
} |
898 |
|
899 |
void qemu_flush_coalesced_mmio_buffer(void) |
900 |
{ |
901 |
if (kvm_enabled())
|
902 |
kvm_flush_coalesced_mmio_buffer(); |
903 |
} |
904 |
|
905 |
void qemu_mutex_lock_ramlist(void) |
906 |
{ |
907 |
qemu_mutex_lock(&ram_list.mutex); |
908 |
} |
909 |
|
910 |
void qemu_mutex_unlock_ramlist(void) |
911 |
{ |
912 |
qemu_mutex_unlock(&ram_list.mutex); |
913 |
} |
914 |
|
915 |
#if defined(__linux__) && !defined(TARGET_S390X)
|
916 |
|
917 |
#include <sys/vfs.h> |
918 |
|
919 |
#define HUGETLBFS_MAGIC 0x958458f6 |
920 |
|
921 |
static long gethugepagesize(const char *path) |
922 |
{ |
923 |
struct statfs fs;
|
924 |
int ret;
|
925 |
|
926 |
do {
|
927 |
ret = statfs(path, &fs); |
928 |
} while (ret != 0 && errno == EINTR); |
929 |
|
930 |
if (ret != 0) { |
931 |
perror(path); |
932 |
return 0; |
933 |
} |
934 |
|
935 |
if (fs.f_type != HUGETLBFS_MAGIC)
|
936 |
fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
|
937 |
|
938 |
return fs.f_bsize;
|
939 |
} |
940 |
|
941 |
static void *file_ram_alloc(RAMBlock *block, |
942 |
ram_addr_t memory, |
943 |
const char *path) |
944 |
{ |
945 |
char *filename;
|
946 |
char *sanitized_name;
|
947 |
char *c;
|
948 |
void *area;
|
949 |
int fd;
|
950 |
#ifdef MAP_POPULATE
|
951 |
int flags;
|
952 |
#endif
|
953 |
unsigned long hpagesize; |
954 |
|
955 |
hpagesize = gethugepagesize(path); |
956 |
if (!hpagesize) {
|
957 |
return NULL; |
958 |
} |
959 |
|
960 |
if (memory < hpagesize) {
|
961 |
return NULL; |
962 |
} |
963 |
|
964 |
if (kvm_enabled() && !kvm_has_sync_mmu()) {
|
965 |
fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
|
966 |
return NULL; |
967 |
} |
968 |
|
969 |
/* Make name safe to use with mkstemp by replacing '/' with '_'. */
|
970 |
sanitized_name = g_strdup(block->mr->name); |
971 |
for (c = sanitized_name; *c != '\0'; c++) { |
972 |
if (*c == '/') |
973 |
*c = '_';
|
974 |
} |
975 |
|
976 |
filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
|
977 |
sanitized_name); |
978 |
g_free(sanitized_name); |
979 |
|
980 |
fd = mkstemp(filename); |
981 |
if (fd < 0) { |
982 |
perror("unable to create backing store for hugepages");
|
983 |
g_free(filename); |
984 |
return NULL; |
985 |
} |
986 |
unlink(filename); |
987 |
g_free(filename); |
988 |
|
989 |
memory = (memory+hpagesize-1) & ~(hpagesize-1); |
990 |
|
991 |
/*
|
992 |
* ftruncate is not supported by hugetlbfs in older
|
993 |
* hosts, so don't bother bailing out on errors.
|
994 |
* If anything goes wrong with it under other filesystems,
|
995 |
* mmap will fail.
|
996 |
*/
|
997 |
if (ftruncate(fd, memory))
|
998 |
perror("ftruncate");
|
999 |
|
1000 |
#ifdef MAP_POPULATE
|
1001 |
/* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
|
1002 |
* MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
|
1003 |
* to sidestep this quirk.
|
1004 |
*/
|
1005 |
flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; |
1006 |
area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); |
1007 |
#else
|
1008 |
area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); |
1009 |
#endif
|
1010 |
if (area == MAP_FAILED) {
|
1011 |
perror("file_ram_alloc: can't mmap RAM pages");
|
1012 |
close(fd); |
1013 |
return (NULL); |
1014 |
} |
1015 |
block->fd = fd; |
1016 |
return area;
|
1017 |
} |
1018 |
#endif
|
1019 |
|
1020 |
static ram_addr_t find_ram_offset(ram_addr_t size)
|
1021 |
{ |
1022 |
RAMBlock *block, *next_block; |
1023 |
ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
1024 |
|
1025 |
assert(size != 0); /* it would hand out same offset multiple times */ |
1026 |
|
1027 |
if (QTAILQ_EMPTY(&ram_list.blocks))
|
1028 |
return 0; |
1029 |
|
1030 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1031 |
ram_addr_t end, next = RAM_ADDR_MAX; |
1032 |
|
1033 |
end = block->offset + block->length; |
1034 |
|
1035 |
QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { |
1036 |
if (next_block->offset >= end) {
|
1037 |
next = MIN(next, next_block->offset); |
1038 |
} |
1039 |
} |
1040 |
if (next - end >= size && next - end < mingap) {
|
1041 |
offset = end; |
1042 |
mingap = next - end; |
1043 |
} |
1044 |
} |
1045 |
|
1046 |
if (offset == RAM_ADDR_MAX) {
|
1047 |
fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", |
1048 |
(uint64_t)size); |
1049 |
abort(); |
1050 |
} |
1051 |
|
1052 |
return offset;
|
1053 |
} |
1054 |
|
1055 |
ram_addr_t last_ram_offset(void)
|
1056 |
{ |
1057 |
RAMBlock *block; |
1058 |
ram_addr_t last = 0;
|
1059 |
|
1060 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) |
1061 |
last = MAX(last, block->offset + block->length); |
1062 |
|
1063 |
return last;
|
1064 |
} |
1065 |
|
1066 |
static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1067 |
{ |
1068 |
int ret;
|
1069 |
QemuOpts *machine_opts; |
1070 |
|
1071 |
/* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
|
1072 |
machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); |
1073 |
if (machine_opts &&
|
1074 |
!qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) { |
1075 |
ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); |
1076 |
if (ret) {
|
1077 |
perror("qemu_madvise");
|
1078 |
fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
|
1079 |
"but dump_guest_core=off specified\n");
|
1080 |
} |
1081 |
} |
1082 |
} |
1083 |
|
1084 |
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) |
1085 |
{ |
1086 |
RAMBlock *new_block, *block; |
1087 |
|
1088 |
new_block = NULL;
|
1089 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1090 |
if (block->offset == addr) {
|
1091 |
new_block = block; |
1092 |
break;
|
1093 |
} |
1094 |
} |
1095 |
assert(new_block); |
1096 |
assert(!new_block->idstr[0]);
|
1097 |
|
1098 |
if (dev) {
|
1099 |
char *id = qdev_get_dev_path(dev);
|
1100 |
if (id) {
|
1101 |
snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); |
1102 |
g_free(id); |
1103 |
} |
1104 |
} |
1105 |
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
|
1106 |
|
1107 |
/* This assumes the iothread lock is taken here too. */
|
1108 |
qemu_mutex_lock_ramlist(); |
1109 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1110 |
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
|
1111 |
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
|
1112 |
new_block->idstr); |
1113 |
abort(); |
1114 |
} |
1115 |
} |
1116 |
qemu_mutex_unlock_ramlist(); |
1117 |
} |
1118 |
|
1119 |
static int memory_try_enable_merging(void *addr, size_t len) |
1120 |
{ |
1121 |
QemuOpts *opts; |
1122 |
|
1123 |
opts = qemu_opts_find(qemu_find_opts("machine"), 0); |
1124 |
if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) { |
1125 |
/* disabled by the user */
|
1126 |
return 0; |
1127 |
} |
1128 |
|
1129 |
return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
|
1130 |
} |
1131 |
|
1132 |
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
1133 |
MemoryRegion *mr) |
1134 |
{ |
1135 |
RAMBlock *block, *new_block; |
1136 |
|
1137 |
size = TARGET_PAGE_ALIGN(size); |
1138 |
new_block = g_malloc0(sizeof(*new_block));
|
1139 |
|
1140 |
/* This assumes the iothread lock is taken here too. */
|
1141 |
qemu_mutex_lock_ramlist(); |
1142 |
new_block->mr = mr; |
1143 |
new_block->offset = find_ram_offset(size); |
1144 |
if (host) {
|
1145 |
new_block->host = host; |
1146 |
new_block->flags |= RAM_PREALLOC_MASK; |
1147 |
} else {
|
1148 |
if (mem_path) {
|
1149 |
#if defined (__linux__) && !defined(TARGET_S390X)
|
1150 |
new_block->host = file_ram_alloc(new_block, size, mem_path); |
1151 |
if (!new_block->host) {
|
1152 |
new_block->host = qemu_anon_ram_alloc(size); |
1153 |
memory_try_enable_merging(new_block->host, size); |
1154 |
} |
1155 |
#else
|
1156 |
fprintf(stderr, "-mem-path option unsupported\n");
|
1157 |
exit(1);
|
1158 |
#endif
|
1159 |
} else {
|
1160 |
if (xen_enabled()) {
|
1161 |
xen_ram_alloc(new_block->offset, size, mr); |
1162 |
} else if (kvm_enabled()) { |
1163 |
/* some s390/kvm configurations have special constraints */
|
1164 |
new_block->host = kvm_ram_alloc(size); |
1165 |
} else {
|
1166 |
new_block->host = qemu_anon_ram_alloc(size); |
1167 |
} |
1168 |
memory_try_enable_merging(new_block->host, size); |
1169 |
} |
1170 |
} |
1171 |
new_block->length = size; |
1172 |
|
1173 |
/* Keep the list sorted from biggest to smallest block. */
|
1174 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1175 |
if (block->length < new_block->length) {
|
1176 |
break;
|
1177 |
} |
1178 |
} |
1179 |
if (block) {
|
1180 |
QTAILQ_INSERT_BEFORE(block, new_block, next); |
1181 |
} else {
|
1182 |
QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); |
1183 |
} |
1184 |
ram_list.mru_block = NULL;
|
1185 |
|
1186 |
ram_list.version++; |
1187 |
qemu_mutex_unlock_ramlist(); |
1188 |
|
1189 |
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, |
1190 |
last_ram_offset() >> TARGET_PAGE_BITS); |
1191 |
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), |
1192 |
0, size >> TARGET_PAGE_BITS);
|
1193 |
cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
|
1194 |
|
1195 |
qemu_ram_setup_dump(new_block->host, size); |
1196 |
qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE); |
1197 |
|
1198 |
if (kvm_enabled())
|
1199 |
kvm_setup_guest_memory(new_block->host, size); |
1200 |
|
1201 |
return new_block->offset;
|
1202 |
} |
1203 |
|
1204 |
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr) |
1205 |
{ |
1206 |
return qemu_ram_alloc_from_ptr(size, NULL, mr); |
1207 |
} |
1208 |
|
1209 |
void qemu_ram_free_from_ptr(ram_addr_t addr)
|
1210 |
{ |
1211 |
RAMBlock *block; |
1212 |
|
1213 |
/* This assumes the iothread lock is taken here too. */
|
1214 |
qemu_mutex_lock_ramlist(); |
1215 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1216 |
if (addr == block->offset) {
|
1217 |
QTAILQ_REMOVE(&ram_list.blocks, block, next); |
1218 |
ram_list.mru_block = NULL;
|
1219 |
ram_list.version++; |
1220 |
g_free(block); |
1221 |
break;
|
1222 |
} |
1223 |
} |
1224 |
qemu_mutex_unlock_ramlist(); |
1225 |
} |
1226 |
|
1227 |
void qemu_ram_free(ram_addr_t addr)
|
1228 |
{ |
1229 |
RAMBlock *block; |
1230 |
|
1231 |
/* This assumes the iothread lock is taken here too. */
|
1232 |
qemu_mutex_lock_ramlist(); |
1233 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1234 |
if (addr == block->offset) {
|
1235 |
QTAILQ_REMOVE(&ram_list.blocks, block, next); |
1236 |
ram_list.mru_block = NULL;
|
1237 |
ram_list.version++; |
1238 |
if (block->flags & RAM_PREALLOC_MASK) {
|
1239 |
; |
1240 |
} else if (mem_path) { |
1241 |
#if defined (__linux__) && !defined(TARGET_S390X)
|
1242 |
if (block->fd) {
|
1243 |
munmap(block->host, block->length); |
1244 |
close(block->fd); |
1245 |
} else {
|
1246 |
qemu_anon_ram_free(block->host, block->length); |
1247 |
} |
1248 |
#else
|
1249 |
abort(); |
1250 |
#endif
|
1251 |
} else {
|
1252 |
if (xen_enabled()) {
|
1253 |
xen_invalidate_map_cache_entry(block->host); |
1254 |
} else {
|
1255 |
qemu_anon_ram_free(block->host, block->length); |
1256 |
} |
1257 |
} |
1258 |
g_free(block); |
1259 |
break;
|
1260 |
} |
1261 |
} |
1262 |
qemu_mutex_unlock_ramlist(); |
1263 |
|
1264 |
} |
1265 |
|
1266 |
#ifndef _WIN32
|
1267 |
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
|
1268 |
{ |
1269 |
RAMBlock *block; |
1270 |
ram_addr_t offset; |
1271 |
int flags;
|
1272 |
void *area, *vaddr;
|
1273 |
|
1274 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1275 |
offset = addr - block->offset; |
1276 |
if (offset < block->length) {
|
1277 |
vaddr = block->host + offset; |
1278 |
if (block->flags & RAM_PREALLOC_MASK) {
|
1279 |
; |
1280 |
} else {
|
1281 |
flags = MAP_FIXED; |
1282 |
munmap(vaddr, length); |
1283 |
if (mem_path) {
|
1284 |
#if defined(__linux__) && !defined(TARGET_S390X)
|
1285 |
if (block->fd) {
|
1286 |
#ifdef MAP_POPULATE
|
1287 |
flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : |
1288 |
MAP_PRIVATE; |
1289 |
#else
|
1290 |
flags |= MAP_PRIVATE; |
1291 |
#endif
|
1292 |
area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
1293 |
flags, block->fd, offset); |
1294 |
} else {
|
1295 |
flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
1296 |
area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
1297 |
flags, -1, 0); |
1298 |
} |
1299 |
#else
|
1300 |
abort(); |
1301 |
#endif
|
1302 |
} else {
|
1303 |
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
|
1304 |
flags |= MAP_SHARED | MAP_ANONYMOUS; |
1305 |
area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, |
1306 |
flags, -1, 0); |
1307 |
#else
|
1308 |
flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
1309 |
area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
1310 |
flags, -1, 0); |
1311 |
#endif
|
1312 |
} |
1313 |
if (area != vaddr) {
|
1314 |
fprintf(stderr, "Could not remap addr: "
|
1315 |
RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", |
1316 |
length, addr); |
1317 |
exit(1);
|
1318 |
} |
1319 |
memory_try_enable_merging(vaddr, length); |
1320 |
qemu_ram_setup_dump(vaddr, length); |
1321 |
} |
1322 |
return;
|
1323 |
} |
1324 |
} |
1325 |
} |
1326 |
#endif /* !_WIN32 */ |
1327 |
|
1328 |
/* Return a host pointer to ram allocated with qemu_ram_alloc.
|
1329 |
With the exception of the softmmu code in this file, this should
|
1330 |
only be used for local memory (e.g. video ram) that the device owns,
|
1331 |
and knows it isn't going to access beyond the end of the block.
|
1332 |
|
1333 |
It should not be used for general purpose DMA.
|
1334 |
Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
|
1335 |
*/
|
1336 |
void *qemu_get_ram_ptr(ram_addr_t addr)
|
1337 |
{ |
1338 |
RAMBlock *block; |
1339 |
|
1340 |
/* The list is protected by the iothread lock here. */
|
1341 |
block = ram_list.mru_block; |
1342 |
if (block && addr - block->offset < block->length) {
|
1343 |
goto found;
|
1344 |
} |
1345 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1346 |
if (addr - block->offset < block->length) {
|
1347 |
goto found;
|
1348 |
} |
1349 |
} |
1350 |
|
1351 |
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); |
1352 |
abort(); |
1353 |
|
1354 |
found:
|
1355 |
ram_list.mru_block = block; |
1356 |
if (xen_enabled()) {
|
1357 |
/* We need to check if the requested address is in the RAM
|
1358 |
* because we don't want to map the entire memory in QEMU.
|
1359 |
* In that case just map until the end of the page.
|
1360 |
*/
|
1361 |
if (block->offset == 0) { |
1362 |
return xen_map_cache(addr, 0, 0); |
1363 |
} else if (block->host == NULL) { |
1364 |
block->host = |
1365 |
xen_map_cache(block->offset, block->length, 1);
|
1366 |
} |
1367 |
} |
1368 |
return block->host + (addr - block->offset);
|
1369 |
} |
1370 |
|
1371 |
/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
|
1372 |
* qemu_get_ram_ptr but do not touch ram_list.mru_block.
|
1373 |
*
|
1374 |
* ??? Is this still necessary?
|
1375 |
*/
|
1376 |
static void *qemu_safe_ram_ptr(ram_addr_t addr) |
1377 |
{ |
1378 |
RAMBlock *block; |
1379 |
|
1380 |
/* The list is protected by the iothread lock here. */
|
1381 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1382 |
if (addr - block->offset < block->length) {
|
1383 |
if (xen_enabled()) {
|
1384 |
/* We need to check if the requested address is in the RAM
|
1385 |
* because we don't want to map the entire memory in QEMU.
|
1386 |
* In that case just map until the end of the page.
|
1387 |
*/
|
1388 |
if (block->offset == 0) { |
1389 |
return xen_map_cache(addr, 0, 0); |
1390 |
} else if (block->host == NULL) { |
1391 |
block->host = |
1392 |
xen_map_cache(block->offset, block->length, 1);
|
1393 |
} |
1394 |
} |
1395 |
return block->host + (addr - block->offset);
|
1396 |
} |
1397 |
} |
1398 |
|
1399 |
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); |
1400 |
abort(); |
1401 |
|
1402 |
return NULL; |
1403 |
} |
1404 |
|
1405 |
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
|
1406 |
* but takes a size argument */
|
1407 |
static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) |
1408 |
{ |
1409 |
if (*size == 0) { |
1410 |
return NULL; |
1411 |
} |
1412 |
if (xen_enabled()) {
|
1413 |
return xen_map_cache(addr, *size, 1); |
1414 |
} else {
|
1415 |
RAMBlock *block; |
1416 |
|
1417 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1418 |
if (addr - block->offset < block->length) {
|
1419 |
if (addr - block->offset + *size > block->length)
|
1420 |
*size = block->length - addr + block->offset; |
1421 |
return block->host + (addr - block->offset);
|
1422 |
} |
1423 |
} |
1424 |
|
1425 |
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); |
1426 |
abort(); |
1427 |
} |
1428 |
} |
1429 |
|
1430 |
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) |
1431 |
{ |
1432 |
RAMBlock *block; |
1433 |
uint8_t *host = ptr; |
1434 |
|
1435 |
if (xen_enabled()) {
|
1436 |
*ram_addr = xen_ram_addr_from_mapcache(ptr); |
1437 |
return 0; |
1438 |
} |
1439 |
|
1440 |
QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1441 |
/* This case append when the block is not mapped. */
|
1442 |
if (block->host == NULL) { |
1443 |
continue;
|
1444 |
} |
1445 |
if (host - block->host < block->length) {
|
1446 |
*ram_addr = block->offset + (host - block->host); |
1447 |
return 0; |
1448 |
} |
1449 |
} |
1450 |
|
1451 |
return -1; |
1452 |
} |
1453 |
|
1454 |
/* Some of the softmmu routines need to translate from a host pointer
|
1455 |
(typically a TLB entry) back to a ram offset. */
|
1456 |
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
|
1457 |
{ |
1458 |
ram_addr_t ram_addr; |
1459 |
|
1460 |
if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
|
1461 |
fprintf(stderr, "Bad ram pointer %p\n", ptr);
|
1462 |
abort(); |
1463 |
} |
1464 |
return ram_addr;
|
1465 |
} |
1466 |
|
1467 |
static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
1468 |
uint64_t val, unsigned size)
|
1469 |
{ |
1470 |
int dirty_flags;
|
1471 |
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
1472 |
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
|
1473 |
tb_invalidate_phys_page_fast(ram_addr, size); |
1474 |
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
1475 |
} |
1476 |
switch (size) {
|
1477 |
case 1: |
1478 |
stb_p(qemu_get_ram_ptr(ram_addr), val); |
1479 |
break;
|
1480 |
case 2: |
1481 |
stw_p(qemu_get_ram_ptr(ram_addr), val); |
1482 |
break;
|
1483 |
case 4: |
1484 |
stl_p(qemu_get_ram_ptr(ram_addr), val); |
1485 |
break;
|
1486 |
default:
|
1487 |
abort(); |
1488 |
} |
1489 |
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
|
1490 |
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
1491 |
/* we remove the notdirty callback only if the code has been
|
1492 |
flushed */
|
1493 |
if (dirty_flags == 0xff) |
1494 |
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
1495 |
} |
1496 |
|
1497 |
static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
1498 |
unsigned size, bool is_write) |
1499 |
{ |
1500 |
return is_write;
|
1501 |
} |
1502 |
|
1503 |
static const MemoryRegionOps notdirty_mem_ops = { |
1504 |
.write = notdirty_mem_write, |
1505 |
.valid.accepts = notdirty_mem_accepts, |
1506 |
.endianness = DEVICE_NATIVE_ENDIAN, |
1507 |
}; |
1508 |
|
1509 |
/* Generate a debug exception if a watchpoint has been hit. */
|
1510 |
static void check_watchpoint(int offset, int len_mask, int flags) |
1511 |
{ |
1512 |
CPUArchState *env = cpu_single_env; |
1513 |
target_ulong pc, cs_base; |
1514 |
target_ulong vaddr; |
1515 |
CPUWatchpoint *wp; |
1516 |
int cpu_flags;
|
1517 |
|
1518 |
if (env->watchpoint_hit) {
|
1519 |
/* We re-entered the check after replacing the TB. Now raise
|
1520 |
* the debug interrupt so that is will trigger after the
|
1521 |
* current instruction. */
|
1522 |
cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); |
1523 |
return;
|
1524 |
} |
1525 |
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
1526 |
QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
1527 |
if ((vaddr == (wp->vaddr & len_mask) ||
|
1528 |
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { |
1529 |
wp->flags |= BP_WATCHPOINT_HIT; |
1530 |
if (!env->watchpoint_hit) {
|
1531 |
env->watchpoint_hit = wp; |
1532 |
tb_check_watchpoint(env); |
1533 |
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
1534 |
env->exception_index = EXCP_DEBUG; |
1535 |
cpu_loop_exit(env); |
1536 |
} else {
|
1537 |
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); |
1538 |
tb_gen_code(env, pc, cs_base, cpu_flags, 1);
|
1539 |
cpu_resume_from_signal(env, NULL);
|
1540 |
} |
1541 |
} |
1542 |
} else {
|
1543 |
wp->flags &= ~BP_WATCHPOINT_HIT; |
1544 |
} |
1545 |
} |
1546 |
} |
1547 |
|
1548 |
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
|
1549 |
so these check for a hit then pass through to the normal out-of-line
|
1550 |
phys routines. */
|
1551 |
static uint64_t watch_mem_read(void *opaque, hwaddr addr, |
1552 |
unsigned size)
|
1553 |
{ |
1554 |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
|
1555 |
switch (size) {
|
1556 |
case 1: return ldub_phys(addr); |
1557 |
case 2: return lduw_phys(addr); |
1558 |
case 4: return ldl_phys(addr); |
1559 |
default: abort();
|
1560 |
} |
1561 |
} |
1562 |
|
1563 |
static void watch_mem_write(void *opaque, hwaddr addr, |
1564 |
uint64_t val, unsigned size)
|
1565 |
{ |
1566 |
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
|
1567 |
switch (size) {
|
1568 |
case 1: |
1569 |
stb_phys(addr, val); |
1570 |
break;
|
1571 |
case 2: |
1572 |
stw_phys(addr, val); |
1573 |
break;
|
1574 |
case 4: |
1575 |
stl_phys(addr, val); |
1576 |
break;
|
1577 |
default: abort();
|
1578 |
} |
1579 |
} |
1580 |
|
1581 |
static const MemoryRegionOps watch_mem_ops = { |
1582 |
.read = watch_mem_read, |
1583 |
.write = watch_mem_write, |
1584 |
.endianness = DEVICE_NATIVE_ENDIAN, |
1585 |
}; |
1586 |
|
1587 |
static uint64_t subpage_read(void *opaque, hwaddr addr, |
1588 |
unsigned len)
|
1589 |
{ |
1590 |
subpage_t *subpage = opaque; |
1591 |
uint8_t buf[4];
|
1592 |
|
1593 |
#if defined(DEBUG_SUBPAGE)
|
1594 |
printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__, |
1595 |
subpage, len, addr); |
1596 |
#endif
|
1597 |
address_space_read(subpage->as, addr + subpage->base, buf, len); |
1598 |
switch (len) {
|
1599 |
case 1: |
1600 |
return ldub_p(buf);
|
1601 |
case 2: |
1602 |
return lduw_p(buf);
|
1603 |
case 4: |
1604 |
return ldl_p(buf);
|
1605 |
default:
|
1606 |
abort(); |
1607 |
} |
1608 |
} |
1609 |
|
1610 |
static void subpage_write(void *opaque, hwaddr addr, |
1611 |
uint64_t value, unsigned len)
|
1612 |
{ |
1613 |
subpage_t *subpage = opaque; |
1614 |
uint8_t buf[4];
|
1615 |
|
1616 |
#if defined(DEBUG_SUBPAGE)
|
1617 |
printf("%s: subpage %p len %d addr " TARGET_FMT_plx
|
1618 |
" value %"PRIx64"\n", |
1619 |
__func__, subpage, len, addr, value); |
1620 |
#endif
|
1621 |
switch (len) {
|
1622 |
case 1: |
1623 |
stb_p(buf, value); |
1624 |
break;
|
1625 |
case 2: |
1626 |
stw_p(buf, value); |
1627 |
break;
|
1628 |
case 4: |
1629 |
stl_p(buf, value); |
1630 |
break;
|
1631 |
default:
|
1632 |
abort(); |
1633 |
} |
1634 |
address_space_write(subpage->as, addr + subpage->base, buf, len); |
1635 |
} |
1636 |
|
1637 |
static bool subpage_accepts(void *opaque, hwaddr addr, |
1638 |
unsigned size, bool is_write) |
1639 |
{ |
1640 |
subpage_t *subpage = opaque; |
1641 |
#if defined(DEBUG_SUBPAGE)
|
1642 |
printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n", |
1643 |
__func__, subpage, is_write ? 'w' : 'r', len, addr); |
1644 |
#endif
|
1645 |
|
1646 |
return address_space_access_valid(subpage->as, addr + subpage->base,
|
1647 |
size, is_write); |
1648 |
} |
1649 |
|
1650 |
static const MemoryRegionOps subpage_ops = { |
1651 |
.read = subpage_read, |
1652 |
.write = subpage_write, |
1653 |
.valid.accepts = subpage_accepts, |
1654 |
.endianness = DEVICE_NATIVE_ENDIAN, |
1655 |
}; |
1656 |
|
1657 |
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
1658 |
uint16_t section) |
1659 |
{ |
1660 |
int idx, eidx;
|
1661 |
|
1662 |
if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
|
1663 |
return -1; |
1664 |
idx = SUBPAGE_IDX(start); |
1665 |
eidx = SUBPAGE_IDX(end); |
1666 |
#if defined(DEBUG_SUBPAGE)
|
1667 |
printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
|
1668 |
mmio, start, end, idx, eidx, memory); |
1669 |
#endif
|
1670 |
for (; idx <= eidx; idx++) {
|
1671 |
mmio->sub_section[idx] = section; |
1672 |
} |
1673 |
|
1674 |
return 0; |
1675 |
} |
1676 |
|
1677 |
static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
|
1678 |
{ |
1679 |
subpage_t *mmio; |
1680 |
|
1681 |
mmio = g_malloc0(sizeof(subpage_t));
|
1682 |
|
1683 |
mmio->as = as; |
1684 |
mmio->base = base; |
1685 |
memory_region_init_io(&mmio->iomem, &subpage_ops, mmio, |
1686 |
"subpage", TARGET_PAGE_SIZE);
|
1687 |
mmio->iomem.subpage = true;
|
1688 |
#if defined(DEBUG_SUBPAGE)
|
1689 |
printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
1690 |
mmio, base, TARGET_PAGE_SIZE, subpage_memory); |
1691 |
#endif
|
1692 |
subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned); |
1693 |
|
1694 |
return mmio;
|
1695 |
} |
1696 |
|
1697 |
static uint16_t dummy_section(MemoryRegion *mr)
|
1698 |
{ |
1699 |
MemoryRegionSection section = { |
1700 |
.mr = mr, |
1701 |
.offset_within_address_space = 0,
|
1702 |
.offset_within_region = 0,
|
1703 |
.size = int128_2_64(), |
1704 |
}; |
1705 |
|
1706 |
return phys_section_add(§ion);
|
1707 |
} |
1708 |
|
1709 |
MemoryRegion *iotlb_to_region(hwaddr index) |
1710 |
{ |
1711 |
return phys_sections[index & ~TARGET_PAGE_MASK].mr;
|
1712 |
} |
1713 |
|
1714 |
static void io_mem_init(void) |
1715 |
{ |
1716 |
memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX); |
1717 |
memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
|
1718 |
"unassigned", UINT64_MAX);
|
1719 |
memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL,
|
1720 |
"notdirty", UINT64_MAX);
|
1721 |
memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
|
1722 |
"watch", UINT64_MAX);
|
1723 |
} |
1724 |
|
1725 |
static void mem_begin(MemoryListener *listener) |
1726 |
{ |
1727 |
AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); |
1728 |
|
1729 |
destroy_all_mappings(d); |
1730 |
d->phys_map.ptr = PHYS_MAP_NODE_NIL; |
1731 |
} |
1732 |
|
1733 |
static void core_begin(MemoryListener *listener) |
1734 |
{ |
1735 |
phys_sections_clear(); |
1736 |
phys_section_unassigned = dummy_section(&io_mem_unassigned); |
1737 |
phys_section_notdirty = dummy_section(&io_mem_notdirty); |
1738 |
phys_section_rom = dummy_section(&io_mem_rom); |
1739 |
phys_section_watch = dummy_section(&io_mem_watch); |
1740 |
} |
1741 |
|
1742 |
static void tcg_commit(MemoryListener *listener) |
1743 |
{ |
1744 |
CPUArchState *env; |
1745 |
|
1746 |
/* since each CPU stores ram addresses in its TLB cache, we must
|
1747 |
reset the modified entries */
|
1748 |
/* XXX: slow ! */
|
1749 |
for(env = first_cpu; env != NULL; env = env->next_cpu) { |
1750 |
tlb_flush(env, 1);
|
1751 |
} |
1752 |
} |
1753 |
|
1754 |
static void core_log_global_start(MemoryListener *listener) |
1755 |
{ |
1756 |
cpu_physical_memory_set_dirty_tracking(1);
|
1757 |
} |
1758 |
|
1759 |
static void core_log_global_stop(MemoryListener *listener) |
1760 |
{ |
1761 |
cpu_physical_memory_set_dirty_tracking(0);
|
1762 |
} |
1763 |
|
1764 |
static void io_region_add(MemoryListener *listener, |
1765 |
MemoryRegionSection *section) |
1766 |
{ |
1767 |
MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
|
1768 |
|
1769 |
mrio->mr = section->mr; |
1770 |
mrio->offset = section->offset_within_region; |
1771 |
iorange_init(&mrio->iorange, &memory_region_iorange_ops, |
1772 |
section->offset_within_address_space, |
1773 |
int128_get64(section->size)); |
1774 |
ioport_register(&mrio->iorange); |
1775 |
} |
1776 |
|
1777 |
static void io_region_del(MemoryListener *listener, |
1778 |
MemoryRegionSection *section) |
1779 |
{ |
1780 |
isa_unassign_ioport(section->offset_within_address_space, |
1781 |
int128_get64(section->size)); |
1782 |
} |
1783 |
|
1784 |
static MemoryListener core_memory_listener = {
|
1785 |
.begin = core_begin, |
1786 |
.log_global_start = core_log_global_start, |
1787 |
.log_global_stop = core_log_global_stop, |
1788 |
.priority = 1,
|
1789 |
}; |
1790 |
|
1791 |
static MemoryListener io_memory_listener = {
|
1792 |
.region_add = io_region_add, |
1793 |
.region_del = io_region_del, |
1794 |
.priority = 0,
|
1795 |
}; |
1796 |
|
1797 |
static MemoryListener tcg_memory_listener = {
|
1798 |
.commit = tcg_commit, |
1799 |
}; |
1800 |
|
1801 |
void address_space_init_dispatch(AddressSpace *as)
|
1802 |
{ |
1803 |
AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
|
1804 |
|
1805 |
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
|
1806 |
d->listener = (MemoryListener) { |
1807 |
.begin = mem_begin, |
1808 |
.region_add = mem_add, |
1809 |
.region_nop = mem_add, |
1810 |
.priority = 0,
|
1811 |
}; |
1812 |
d->as = as; |
1813 |
as->dispatch = d; |
1814 |
memory_listener_register(&d->listener, as); |
1815 |
} |
1816 |
|
1817 |
void address_space_destroy_dispatch(AddressSpace *as)
|
1818 |
{ |
1819 |
AddressSpaceDispatch *d = as->dispatch; |
1820 |
|
1821 |
memory_listener_unregister(&d->listener); |
1822 |
destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
|
1823 |
g_free(d); |
1824 |
as->dispatch = NULL;
|
1825 |
} |
1826 |
|
1827 |
static void memory_map_init(void) |
1828 |
{ |
1829 |
system_memory = g_malloc(sizeof(*system_memory));
|
1830 |
memory_region_init(system_memory, "system", INT64_MAX);
|
1831 |
address_space_init(&address_space_memory, system_memory); |
1832 |
address_space_memory.name = "memory";
|
1833 |
|
1834 |
system_io = g_malloc(sizeof(*system_io));
|
1835 |
memory_region_init(system_io, "io", 65536); |
1836 |
address_space_init(&address_space_io, system_io); |
1837 |
address_space_io.name = "I/O";
|
1838 |
|
1839 |
memory_listener_register(&core_memory_listener, &address_space_memory); |
1840 |
memory_listener_register(&io_memory_listener, &address_space_io); |
1841 |
memory_listener_register(&tcg_memory_listener, &address_space_memory); |
1842 |
|
1843 |
dma_context_init(&dma_context_memory, &address_space_memory); |
1844 |
} |
1845 |
|
1846 |
MemoryRegion *get_system_memory(void)
|
1847 |
{ |
1848 |
return system_memory;
|
1849 |
} |
1850 |
|
1851 |
MemoryRegion *get_system_io(void)
|
1852 |
{ |
1853 |
return system_io;
|
1854 |
} |
1855 |
|
1856 |
#endif /* !defined(CONFIG_USER_ONLY) */ |
1857 |
|
1858 |
/* physical memory access (slow version, mainly for debug) */
|
1859 |
#if defined(CONFIG_USER_ONLY)
|
1860 |
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
1861 |
uint8_t *buf, int len, int is_write) |
1862 |
{ |
1863 |
int l, flags;
|
1864 |
target_ulong page; |
1865 |
void * p;
|
1866 |
|
1867 |
while (len > 0) { |
1868 |
page = addr & TARGET_PAGE_MASK; |
1869 |
l = (page + TARGET_PAGE_SIZE) - addr; |
1870 |
if (l > len)
|
1871 |
l = len; |
1872 |
flags = page_get_flags(page); |
1873 |
if (!(flags & PAGE_VALID))
|
1874 |
return -1; |
1875 |
if (is_write) {
|
1876 |
if (!(flags & PAGE_WRITE))
|
1877 |
return -1; |
1878 |
/* XXX: this code should not depend on lock_user */
|
1879 |
if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
1880 |
return -1; |
1881 |
memcpy(p, buf, l); |
1882 |
unlock_user(p, addr, l); |
1883 |
} else {
|
1884 |
if (!(flags & PAGE_READ))
|
1885 |
return -1; |
1886 |
/* XXX: this code should not depend on lock_user */
|
1887 |
if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
1888 |
return -1; |
1889 |
memcpy(buf, p, l); |
1890 |
unlock_user(p, addr, 0);
|
1891 |
} |
1892 |
len -= l; |
1893 |
buf += l; |
1894 |
addr += l; |
1895 |
} |
1896 |
return 0; |
1897 |
} |
1898 |
|
1899 |
#else
|
1900 |
|
1901 |
static void invalidate_and_set_dirty(hwaddr addr, |
1902 |
hwaddr length) |
1903 |
{ |
1904 |
if (!cpu_physical_memory_is_dirty(addr)) {
|
1905 |
/* invalidate code */
|
1906 |
tb_invalidate_phys_page_range(addr, addr + length, 0);
|
1907 |
/* set dirty bit */
|
1908 |
cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
|
1909 |
} |
1910 |
xen_modified_memory(addr, length); |
1911 |
} |
1912 |
|
1913 |
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) |
1914 |
{ |
1915 |
if (memory_region_is_ram(mr)) {
|
1916 |
return !(is_write && mr->readonly);
|
1917 |
} |
1918 |
if (memory_region_is_romd(mr)) {
|
1919 |
return !is_write;
|
1920 |
} |
1921 |
|
1922 |
return false; |
1923 |
} |
1924 |
|
1925 |
static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr) |
1926 |
{ |
1927 |
if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) { |
1928 |
return 4; |
1929 |
} |
1930 |
if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) { |
1931 |
return 2; |
1932 |
} |
1933 |
return 1; |
1934 |
} |
1935 |
|
1936 |
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
1937 |
int len, bool is_write) |
1938 |
{ |
1939 |
hwaddr l; |
1940 |
uint8_t *ptr; |
1941 |
uint64_t val; |
1942 |
hwaddr addr1; |
1943 |
MemoryRegion *mr; |
1944 |
bool error = false; |
1945 |
|
1946 |
while (len > 0) { |
1947 |
l = len; |
1948 |
mr = address_space_translate(as, addr, &addr1, &l, is_write); |
1949 |
|
1950 |
if (is_write) {
|
1951 |
if (!memory_access_is_direct(mr, is_write)) {
|
1952 |
l = memory_access_size(mr, l, addr1); |
1953 |
/* XXX: could force cpu_single_env to NULL to avoid
|
1954 |
potential bugs */
|
1955 |
if (l == 4) { |
1956 |
/* 32 bit write access */
|
1957 |
val = ldl_p(buf); |
1958 |
error |= io_mem_write(mr, addr1, val, 4);
|
1959 |
} else if (l == 2) { |
1960 |
/* 16 bit write access */
|
1961 |
val = lduw_p(buf); |
1962 |
error |= io_mem_write(mr, addr1, val, 2);
|
1963 |
} else {
|
1964 |
/* 8 bit write access */
|
1965 |
val = ldub_p(buf); |
1966 |
error |= io_mem_write(mr, addr1, val, 1);
|
1967 |
} |
1968 |
} else {
|
1969 |
addr1 += memory_region_get_ram_addr(mr); |
1970 |
/* RAM case */
|
1971 |
ptr = qemu_get_ram_ptr(addr1); |
1972 |
memcpy(ptr, buf, l); |
1973 |
invalidate_and_set_dirty(addr1, l); |
1974 |
} |
1975 |
} else {
|
1976 |
if (!memory_access_is_direct(mr, is_write)) {
|
1977 |
/* I/O case */
|
1978 |
l = memory_access_size(mr, l, addr1); |
1979 |
if (l == 4) { |
1980 |
/* 32 bit read access */
|
1981 |
error |= io_mem_read(mr, addr1, &val, 4);
|
1982 |
stl_p(buf, val); |
1983 |
} else if (l == 2) { |
1984 |
/* 16 bit read access */
|
1985 |
error |= io_mem_read(mr, addr1, &val, 2);
|
1986 |
stw_p(buf, val); |
1987 |
} else {
|
1988 |
/* 8 bit read access */
|
1989 |
error |= io_mem_read(mr, addr1, &val, 1);
|
1990 |
stb_p(buf, val); |
1991 |
} |
1992 |
} else {
|
1993 |
/* RAM case */
|
1994 |
ptr = qemu_get_ram_ptr(mr->ram_addr + addr1); |
1995 |
memcpy(buf, ptr, l); |
1996 |
} |
1997 |
} |
1998 |
len -= l; |
1999 |
buf += l; |
2000 |
addr += l; |
2001 |
} |
2002 |
|
2003 |
return error;
|
2004 |
} |
2005 |
|
2006 |
bool address_space_write(AddressSpace *as, hwaddr addr,
|
2007 |
const uint8_t *buf, int len) |
2008 |
{ |
2009 |
return address_space_rw(as, addr, (uint8_t *)buf, len, true); |
2010 |
} |
2011 |
|
2012 |
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) |
2013 |
{ |
2014 |
return address_space_rw(as, addr, buf, len, false); |
2015 |
} |
2016 |
|
2017 |
|
2018 |
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
|
2019 |
int len, int is_write) |
2020 |
{ |
2021 |
address_space_rw(&address_space_memory, addr, buf, len, is_write); |
2022 |
} |
2023 |
|
2024 |
/* used for ROM loading : can write in RAM and ROM */
|
2025 |
void cpu_physical_memory_write_rom(hwaddr addr,
|
2026 |
const uint8_t *buf, int len) |
2027 |
{ |
2028 |
hwaddr l; |
2029 |
uint8_t *ptr; |
2030 |
hwaddr addr1; |
2031 |
MemoryRegion *mr; |
2032 |
|
2033 |
while (len > 0) { |
2034 |
l = len; |
2035 |
mr = address_space_translate(&address_space_memory, |
2036 |
addr, &addr1, &l, true);
|
2037 |
|
2038 |
if (!(memory_region_is_ram(mr) ||
|
2039 |
memory_region_is_romd(mr))) { |
2040 |
/* do nothing */
|
2041 |
} else {
|
2042 |
addr1 += memory_region_get_ram_addr(mr); |
2043 |
/* ROM/RAM case */
|
2044 |
ptr = qemu_get_ram_ptr(addr1); |
2045 |
memcpy(ptr, buf, l); |
2046 |
invalidate_and_set_dirty(addr1, l); |
2047 |
} |
2048 |
len -= l; |
2049 |
buf += l; |
2050 |
addr += l; |
2051 |
} |
2052 |
} |
2053 |
|
2054 |
typedef struct { |
2055 |
void *buffer;
|
2056 |
hwaddr addr; |
2057 |
hwaddr len; |
2058 |
} BounceBuffer; |
2059 |
|
2060 |
static BounceBuffer bounce;
|
2061 |
|
2062 |
typedef struct MapClient { |
2063 |
void *opaque;
|
2064 |
void (*callback)(void *opaque); |
2065 |
QLIST_ENTRY(MapClient) link; |
2066 |
} MapClient; |
2067 |
|
2068 |
static QLIST_HEAD(map_client_list, MapClient) map_client_list
|
2069 |
= QLIST_HEAD_INITIALIZER(map_client_list); |
2070 |
|
2071 |
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) |
2072 |
{ |
2073 |
MapClient *client = g_malloc(sizeof(*client));
|
2074 |
|
2075 |
client->opaque = opaque; |
2076 |
client->callback = callback; |
2077 |
QLIST_INSERT_HEAD(&map_client_list, client, link); |
2078 |
return client;
|
2079 |
} |
2080 |
|
2081 |
static void cpu_unregister_map_client(void *_client) |
2082 |
{ |
2083 |
MapClient *client = (MapClient *)_client; |
2084 |
|
2085 |
QLIST_REMOVE(client, link); |
2086 |
g_free(client); |
2087 |
} |
2088 |
|
2089 |
static void cpu_notify_map_clients(void) |
2090 |
{ |
2091 |
MapClient *client; |
2092 |
|
2093 |
while (!QLIST_EMPTY(&map_client_list)) {
|
2094 |
client = QLIST_FIRST(&map_client_list); |
2095 |
client->callback(client->opaque); |
2096 |
cpu_unregister_map_client(client); |
2097 |
} |
2098 |
} |
2099 |
|
2100 |
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
2101 |
{ |
2102 |
MemoryRegion *mr; |
2103 |
hwaddr l, xlat; |
2104 |
|
2105 |
while (len > 0) { |
2106 |
l = len; |
2107 |
mr = address_space_translate(as, addr, &xlat, &l, is_write); |
2108 |
if (!memory_access_is_direct(mr, is_write)) {
|
2109 |
l = memory_access_size(mr, l, addr); |
2110 |
if (!memory_region_access_valid(mr, xlat, l, is_write)) {
|
2111 |
return false; |
2112 |
} |
2113 |
} |
2114 |
|
2115 |
len -= l; |
2116 |
addr += l; |
2117 |
} |
2118 |
return true; |
2119 |
} |
2120 |
|
2121 |
/* Map a physical memory region into a host virtual address.
|
2122 |
* May map a subset of the requested range, given by and returned in *plen.
|
2123 |
* May return NULL if resources needed to perform the mapping are exhausted.
|
2124 |
* Use only for reads OR writes - not for read-modify-write operations.
|
2125 |
* Use cpu_register_map_client() to know when retrying the map operation is
|
2126 |
* likely to succeed.
|
2127 |
*/
|
2128 |
void *address_space_map(AddressSpace *as,
|
2129 |
hwaddr addr, |
2130 |
hwaddr *plen, |
2131 |
bool is_write)
|
2132 |
{ |
2133 |
hwaddr len = *plen; |
2134 |
hwaddr todo = 0;
|
2135 |
hwaddr l, xlat; |
2136 |
MemoryRegion *mr; |
2137 |
ram_addr_t raddr = RAM_ADDR_MAX; |
2138 |
ram_addr_t rlen; |
2139 |
void *ret;
|
2140 |
|
2141 |
while (len > 0) { |
2142 |
l = len; |
2143 |
mr = address_space_translate(as, addr, &xlat, &l, is_write); |
2144 |
|
2145 |
if (!memory_access_is_direct(mr, is_write)) {
|
2146 |
if (todo || bounce.buffer) {
|
2147 |
break;
|
2148 |
} |
2149 |
bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); |
2150 |
bounce.addr = addr; |
2151 |
bounce.len = l; |
2152 |
if (!is_write) {
|
2153 |
address_space_read(as, addr, bounce.buffer, l); |
2154 |
} |
2155 |
|
2156 |
*plen = l; |
2157 |
return bounce.buffer;
|
2158 |
} |
2159 |
if (!todo) {
|
2160 |
raddr = memory_region_get_ram_addr(mr) + xlat; |
2161 |
} else {
|
2162 |
if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
|
2163 |
break;
|
2164 |
} |
2165 |
} |
2166 |
|
2167 |
len -= l; |
2168 |
addr += l; |
2169 |
todo += l; |
2170 |
} |
2171 |
rlen = todo; |
2172 |
ret = qemu_ram_ptr_length(raddr, &rlen); |
2173 |
*plen = rlen; |
2174 |
return ret;
|
2175 |
} |
2176 |
|
2177 |
/* Unmaps a memory region previously mapped by address_space_map().
|
2178 |
* Will also mark the memory as dirty if is_write == 1. access_len gives
|
2179 |
* the amount of memory that was actually read or written by the caller.
|
2180 |
*/
|
2181 |
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
2182 |
int is_write, hwaddr access_len)
|
2183 |
{ |
2184 |
if (buffer != bounce.buffer) {
|
2185 |
if (is_write) {
|
2186 |
ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); |
2187 |
while (access_len) {
|
2188 |
unsigned l;
|
2189 |
l = TARGET_PAGE_SIZE; |
2190 |
if (l > access_len)
|
2191 |
l = access_len; |
2192 |
invalidate_and_set_dirty(addr1, l); |
2193 |
addr1 += l; |
2194 |
access_len -= l; |
2195 |
} |
2196 |
} |
2197 |
if (xen_enabled()) {
|
2198 |
xen_invalidate_map_cache_entry(buffer); |
2199 |
} |
2200 |
return;
|
2201 |
} |
2202 |
if (is_write) {
|
2203 |
address_space_write(as, bounce.addr, bounce.buffer, access_len); |
2204 |
} |
2205 |
qemu_vfree(bounce.buffer); |
2206 |
bounce.buffer = NULL;
|
2207 |
cpu_notify_map_clients(); |
2208 |
} |
2209 |
|
2210 |
void *cpu_physical_memory_map(hwaddr addr,
|
2211 |
hwaddr *plen, |
2212 |
int is_write)
|
2213 |
{ |
2214 |
return address_space_map(&address_space_memory, addr, plen, is_write);
|
2215 |
} |
2216 |
|
2217 |
void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
2218 |
int is_write, hwaddr access_len)
|
2219 |
{ |
2220 |
return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
|
2221 |
} |
2222 |
|
2223 |
/* warning: addr must be aligned */
|
2224 |
static inline uint32_t ldl_phys_internal(hwaddr addr, |
2225 |
enum device_endian endian)
|
2226 |
{ |
2227 |
uint8_t *ptr; |
2228 |
uint64_t val; |
2229 |
MemoryRegion *mr; |
2230 |
hwaddr l = 4;
|
2231 |
hwaddr addr1; |
2232 |
|
2233 |
mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2234 |
false);
|
2235 |
if (l < 4 || !memory_access_is_direct(mr, false)) { |
2236 |
/* I/O case */
|
2237 |
io_mem_read(mr, addr1, &val, 4);
|
2238 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
2239 |
if (endian == DEVICE_LITTLE_ENDIAN) {
|
2240 |
val = bswap32(val); |
2241 |
} |
2242 |
#else
|
2243 |
if (endian == DEVICE_BIG_ENDIAN) {
|
2244 |
val = bswap32(val); |
2245 |
} |
2246 |
#endif
|
2247 |
} else {
|
2248 |
/* RAM case */
|
2249 |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
2250 |
& TARGET_PAGE_MASK) |
2251 |
+ addr1); |
2252 |
switch (endian) {
|
2253 |
case DEVICE_LITTLE_ENDIAN:
|
2254 |
val = ldl_le_p(ptr); |
2255 |
break;
|
2256 |
case DEVICE_BIG_ENDIAN:
|
2257 |
val = ldl_be_p(ptr); |
2258 |
break;
|
2259 |
default:
|
2260 |
val = ldl_p(ptr); |
2261 |
break;
|
2262 |
} |
2263 |
} |
2264 |
return val;
|
2265 |
} |
2266 |
|
2267 |
uint32_t ldl_phys(hwaddr addr) |
2268 |
{ |
2269 |
return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
|
2270 |
} |
2271 |
|
2272 |
uint32_t ldl_le_phys(hwaddr addr) |
2273 |
{ |
2274 |
return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
|
2275 |
} |
2276 |
|
2277 |
uint32_t ldl_be_phys(hwaddr addr) |
2278 |
{ |
2279 |
return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
|
2280 |
} |
2281 |
|
2282 |
/* warning: addr must be aligned */
|
2283 |
static inline uint64_t ldq_phys_internal(hwaddr addr, |
2284 |
enum device_endian endian)
|
2285 |
{ |
2286 |
uint8_t *ptr; |
2287 |
uint64_t val; |
2288 |
MemoryRegion *mr; |
2289 |
hwaddr l = 8;
|
2290 |
hwaddr addr1; |
2291 |
|
2292 |
mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2293 |
false);
|
2294 |
if (l < 8 || !memory_access_is_direct(mr, false)) { |
2295 |
/* I/O case */
|
2296 |
io_mem_read(mr, addr1, &val, 8);
|
2297 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
2298 |
if (endian == DEVICE_LITTLE_ENDIAN) {
|
2299 |
val = bswap64(val); |
2300 |
} |
2301 |
#else
|
2302 |
if (endian == DEVICE_BIG_ENDIAN) {
|
2303 |
val = bswap64(val); |
2304 |
} |
2305 |
#endif
|
2306 |
} else {
|
2307 |
/* RAM case */
|
2308 |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
2309 |
& TARGET_PAGE_MASK) |
2310 |
+ addr1); |
2311 |
switch (endian) {
|
2312 |
case DEVICE_LITTLE_ENDIAN:
|
2313 |
val = ldq_le_p(ptr); |
2314 |
break;
|
2315 |
case DEVICE_BIG_ENDIAN:
|
2316 |
val = ldq_be_p(ptr); |
2317 |
break;
|
2318 |
default:
|
2319 |
val = ldq_p(ptr); |
2320 |
break;
|
2321 |
} |
2322 |
} |
2323 |
return val;
|
2324 |
} |
2325 |
|
2326 |
uint64_t ldq_phys(hwaddr addr) |
2327 |
{ |
2328 |
return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
|
2329 |
} |
2330 |
|
2331 |
uint64_t ldq_le_phys(hwaddr addr) |
2332 |
{ |
2333 |
return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
|
2334 |
} |
2335 |
|
2336 |
uint64_t ldq_be_phys(hwaddr addr) |
2337 |
{ |
2338 |
return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
|
2339 |
} |
2340 |
|
2341 |
/* XXX: optimize */
|
2342 |
uint32_t ldub_phys(hwaddr addr) |
2343 |
{ |
2344 |
uint8_t val; |
2345 |
cpu_physical_memory_read(addr, &val, 1);
|
2346 |
return val;
|
2347 |
} |
2348 |
|
2349 |
/* warning: addr must be aligned */
|
2350 |
static inline uint32_t lduw_phys_internal(hwaddr addr, |
2351 |
enum device_endian endian)
|
2352 |
{ |
2353 |
uint8_t *ptr; |
2354 |
uint64_t val; |
2355 |
MemoryRegion *mr; |
2356 |
hwaddr l = 2;
|
2357 |
hwaddr addr1; |
2358 |
|
2359 |
mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2360 |
false);
|
2361 |
if (l < 2 || !memory_access_is_direct(mr, false)) { |
2362 |
/* I/O case */
|
2363 |
io_mem_read(mr, addr1, &val, 2);
|
2364 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
2365 |
if (endian == DEVICE_LITTLE_ENDIAN) {
|
2366 |
val = bswap16(val); |
2367 |
} |
2368 |
#else
|
2369 |
if (endian == DEVICE_BIG_ENDIAN) {
|
2370 |
val = bswap16(val); |
2371 |
} |
2372 |
#endif
|
2373 |
} else {
|
2374 |
/* RAM case */
|
2375 |
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
2376 |
& TARGET_PAGE_MASK) |
2377 |
+ addr1); |
2378 |
switch (endian) {
|
2379 |
case DEVICE_LITTLE_ENDIAN:
|
2380 |
val = lduw_le_p(ptr); |
2381 |
break;
|
2382 |
case DEVICE_BIG_ENDIAN:
|
2383 |
val = lduw_be_p(ptr); |
2384 |
break;
|
2385 |
default:
|
2386 |
val = lduw_p(ptr); |
2387 |
break;
|
2388 |
} |
2389 |
} |
2390 |
return val;
|
2391 |
} |
2392 |
|
2393 |
uint32_t lduw_phys(hwaddr addr) |
2394 |
{ |
2395 |
return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
|
2396 |
} |
2397 |
|
2398 |
uint32_t lduw_le_phys(hwaddr addr) |
2399 |
{ |
2400 |
return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
|
2401 |
} |
2402 |
|
2403 |
uint32_t lduw_be_phys(hwaddr addr) |
2404 |
{ |
2405 |
return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
|
2406 |
} |
2407 |
|
2408 |
/* warning: addr must be aligned. The ram page is not masked as dirty
|
2409 |
and the code inside is not invalidated. It is useful if the dirty
|
2410 |
bits are used to track modified PTEs */
|
2411 |
void stl_phys_notdirty(hwaddr addr, uint32_t val)
|
2412 |
{ |
2413 |
uint8_t *ptr; |
2414 |
MemoryRegion *mr; |
2415 |
hwaddr l = 4;
|
2416 |
hwaddr addr1; |
2417 |
|
2418 |
mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2419 |
true);
|
2420 |
if (l < 4 || !memory_access_is_direct(mr, true)) { |
2421 |
io_mem_write(mr, addr1, val, 4);
|
2422 |
} else {
|
2423 |
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
2424 |
ptr = qemu_get_ram_ptr(addr1); |
2425 |
stl_p(ptr, val); |
2426 |
|
2427 |
if (unlikely(in_migration)) {
|
2428 |
if (!cpu_physical_memory_is_dirty(addr1)) {
|
2429 |
/* invalidate code */
|
2430 |
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); |
2431 |
/* set dirty bit */
|
2432 |
cpu_physical_memory_set_dirty_flags( |
2433 |
addr1, (0xff & ~CODE_DIRTY_FLAG));
|
2434 |
} |
2435 |
} |
2436 |
} |
2437 |
} |
2438 |
|
2439 |
/* warning: addr must be aligned */
|
2440 |
static inline void stl_phys_internal(hwaddr addr, uint32_t val, |
2441 |
enum device_endian endian)
|
2442 |
{ |
2443 |
uint8_t *ptr; |
2444 |
MemoryRegion *mr; |
2445 |
hwaddr l = 4;
|
2446 |
hwaddr addr1; |
2447 |
|
2448 |
mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2449 |
true);
|
2450 |
if (l < 4 || !memory_access_is_direct(mr, true)) { |
2451 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
2452 |
if (endian == DEVICE_LITTLE_ENDIAN) {
|
2453 |
val = bswap32(val); |
2454 |
} |
2455 |
#else
|
2456 |
if (endian == DEVICE_BIG_ENDIAN) {
|
2457 |
val = bswap32(val); |
2458 |
} |
2459 |
#endif
|
2460 |
io_mem_write(mr, addr1, val, 4);
|
2461 |
} else {
|
2462 |
/* RAM case */
|
2463 |
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
2464 |
ptr = qemu_get_ram_ptr(addr1); |
2465 |
switch (endian) {
|
2466 |
case DEVICE_LITTLE_ENDIAN:
|
2467 |
stl_le_p(ptr, val); |
2468 |
break;
|
2469 |
case DEVICE_BIG_ENDIAN:
|
2470 |
stl_be_p(ptr, val); |
2471 |
break;
|
2472 |
default:
|
2473 |
stl_p(ptr, val); |
2474 |
break;
|
2475 |
} |
2476 |
invalidate_and_set_dirty(addr1, 4);
|
2477 |
} |
2478 |
} |
2479 |
|
2480 |
void stl_phys(hwaddr addr, uint32_t val)
|
2481 |
{ |
2482 |
stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); |
2483 |
} |
2484 |
|
2485 |
void stl_le_phys(hwaddr addr, uint32_t val)
|
2486 |
{ |
2487 |
stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); |
2488 |
} |
2489 |
|
2490 |
void stl_be_phys(hwaddr addr, uint32_t val)
|
2491 |
{ |
2492 |
stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); |
2493 |
} |
2494 |
|
2495 |
/* XXX: optimize */
|
2496 |
void stb_phys(hwaddr addr, uint32_t val)
|
2497 |
{ |
2498 |
uint8_t v = val; |
2499 |
cpu_physical_memory_write(addr, &v, 1);
|
2500 |
} |
2501 |
|
2502 |
/* warning: addr must be aligned */
|
2503 |
static inline void stw_phys_internal(hwaddr addr, uint32_t val, |
2504 |
enum device_endian endian)
|
2505 |
{ |
2506 |
uint8_t *ptr; |
2507 |
MemoryRegion *mr; |
2508 |
hwaddr l = 2;
|
2509 |
hwaddr addr1; |
2510 |
|
2511 |
mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2512 |
true);
|
2513 |
if (l < 2 || !memory_access_is_direct(mr, true)) { |
2514 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
2515 |
if (endian == DEVICE_LITTLE_ENDIAN) {
|
2516 |
val = bswap16(val); |
2517 |
} |
2518 |
#else
|
2519 |
if (endian == DEVICE_BIG_ENDIAN) {
|
2520 |
val = bswap16(val); |
2521 |
} |
2522 |
#endif
|
2523 |
io_mem_write(mr, addr1, val, 2);
|
2524 |
} else {
|
2525 |
/* RAM case */
|
2526 |
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
2527 |
ptr = qemu_get_ram_ptr(addr1); |
2528 |
switch (endian) {
|
2529 |
case DEVICE_LITTLE_ENDIAN:
|
2530 |
stw_le_p(ptr, val); |
2531 |
break;
|
2532 |
case DEVICE_BIG_ENDIAN:
|
2533 |
stw_be_p(ptr, val); |
2534 |
break;
|
2535 |
default:
|
2536 |
stw_p(ptr, val); |
2537 |
break;
|
2538 |
} |
2539 |
invalidate_and_set_dirty(addr1, 2);
|
2540 |
} |
2541 |
} |
2542 |
|
2543 |
void stw_phys(hwaddr addr, uint32_t val)
|
2544 |
{ |
2545 |
stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); |
2546 |
} |
2547 |
|
2548 |
void stw_le_phys(hwaddr addr, uint32_t val)
|
2549 |
{ |
2550 |
stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); |
2551 |
} |
2552 |
|
2553 |
void stw_be_phys(hwaddr addr, uint32_t val)
|
2554 |
{ |
2555 |
stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN); |
2556 |
} |
2557 |
|
2558 |
/* XXX: optimize */
|
2559 |
void stq_phys(hwaddr addr, uint64_t val)
|
2560 |
{ |
2561 |
val = tswap64(val); |
2562 |
cpu_physical_memory_write(addr, &val, 8);
|
2563 |
} |
2564 |
|
2565 |
void stq_le_phys(hwaddr addr, uint64_t val)
|
2566 |
{ |
2567 |
val = cpu_to_le64(val); |
2568 |
cpu_physical_memory_write(addr, &val, 8);
|
2569 |
} |
2570 |
|
2571 |
void stq_be_phys(hwaddr addr, uint64_t val)
|
2572 |
{ |
2573 |
val = cpu_to_be64(val); |
2574 |
cpu_physical_memory_write(addr, &val, 8);
|
2575 |
} |
2576 |
|
2577 |
/* virtual memory access for debug (includes writing to ROM) */
|
2578 |
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
2579 |
uint8_t *buf, int len, int is_write) |
2580 |
{ |
2581 |
int l;
|
2582 |
hwaddr phys_addr; |
2583 |
target_ulong page; |
2584 |
|
2585 |
while (len > 0) { |
2586 |
page = addr & TARGET_PAGE_MASK; |
2587 |
phys_addr = cpu_get_phys_page_debug(env, page); |
2588 |
/* if no physical page mapped, return an error */
|
2589 |
if (phys_addr == -1) |
2590 |
return -1; |
2591 |
l = (page + TARGET_PAGE_SIZE) - addr; |
2592 |
if (l > len)
|
2593 |
l = len; |
2594 |
phys_addr += (addr & ~TARGET_PAGE_MASK); |
2595 |
if (is_write)
|
2596 |
cpu_physical_memory_write_rom(phys_addr, buf, l); |
2597 |
else
|
2598 |
cpu_physical_memory_rw(phys_addr, buf, l, is_write); |
2599 |
len -= l; |
2600 |
buf += l; |
2601 |
addr += l; |
2602 |
} |
2603 |
return 0; |
2604 |
} |
2605 |
#endif
|
2606 |
|
2607 |
#if !defined(CONFIG_USER_ONLY)
|
2608 |
|
2609 |
/*
|
2610 |
* A helper function for the _utterly broken_ virtio device model to find out if
|
2611 |
* it's running on a big endian machine. Don't do this at home kids!
|
2612 |
*/
|
2613 |
bool virtio_is_big_endian(void); |
2614 |
bool virtio_is_big_endian(void) |
2615 |
{ |
2616 |
#if defined(TARGET_WORDS_BIGENDIAN)
|
2617 |
return true; |
2618 |
#else
|
2619 |
return false; |
2620 |
#endif
|
2621 |
} |
2622 |
|
2623 |
#endif
|
2624 |
|
2625 |
#ifndef CONFIG_USER_ONLY
|
2626 |
bool cpu_physical_memory_is_io(hwaddr phys_addr)
|
2627 |
{ |
2628 |
MemoryRegion*mr; |
2629 |
hwaddr l = 1;
|
2630 |
|
2631 |
mr = address_space_translate(&address_space_memory, |
2632 |
phys_addr, &phys_addr, &l, false);
|
2633 |
|
2634 |
return !(memory_region_is_ram(mr) ||
|
2635 |
memory_region_is_romd(mr)); |
2636 |
} |
2637 |
#endif
|