root / memory.c @ ce5d2f33
History | View | Annotate | Download (55.8 kB)
1 |
/*
|
---|---|
2 |
* Physical memory management
|
3 |
*
|
4 |
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
5 |
*
|
6 |
* Authors:
|
7 |
* Avi Kivity <avi@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Contributions after 2012-01-13 are licensed under the terms of the
|
13 |
* GNU GPL, version 2 or (at your option) any later version.
|
14 |
*/
|
15 |
|
16 |
#include "exec/memory.h" |
17 |
#include "exec/address-spaces.h" |
18 |
#include "exec/ioport.h" |
19 |
#include "qemu/bitops.h" |
20 |
#include "sysemu/kvm.h" |
21 |
#include <assert.h> |
22 |
|
23 |
#include "exec/memory-internal.h" |
24 |
|
25 |
//#define DEBUG_UNASSIGNED
|
26 |
|
27 |
static unsigned memory_region_transaction_depth; |
28 |
static bool memory_region_update_pending; |
29 |
static bool global_dirty_log = false; |
30 |
|
31 |
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
|
32 |
= QTAILQ_HEAD_INITIALIZER(memory_listeners); |
33 |
|
34 |
static QTAILQ_HEAD(, AddressSpace) address_spaces
|
35 |
= QTAILQ_HEAD_INITIALIZER(address_spaces); |
36 |
|
37 |
typedef struct AddrRange AddrRange; |
38 |
|
39 |
/*
|
40 |
* Note using signed integers limits us to physical addresses at most
|
41 |
* 63 bits wide. They are needed for negative offsetting in aliases
|
42 |
* (large MemoryRegion::alias_offset).
|
43 |
*/
|
44 |
struct AddrRange {
|
45 |
Int128 start; |
46 |
Int128 size; |
47 |
}; |
48 |
|
49 |
static AddrRange addrrange_make(Int128 start, Int128 size)
|
50 |
{ |
51 |
return (AddrRange) { start, size };
|
52 |
} |
53 |
|
54 |
static bool addrrange_equal(AddrRange r1, AddrRange r2) |
55 |
{ |
56 |
return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
|
57 |
} |
58 |
|
59 |
static Int128 addrrange_end(AddrRange r)
|
60 |
{ |
61 |
return int128_add(r.start, r.size);
|
62 |
} |
63 |
|
64 |
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
|
65 |
{ |
66 |
int128_addto(&range.start, delta); |
67 |
return range;
|
68 |
} |
69 |
|
70 |
static bool addrrange_contains(AddrRange range, Int128 addr) |
71 |
{ |
72 |
return int128_ge(addr, range.start)
|
73 |
&& int128_lt(addr, addrrange_end(range)); |
74 |
} |
75 |
|
76 |
static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
77 |
{ |
78 |
return addrrange_contains(r1, r2.start)
|
79 |
|| addrrange_contains(r2, r1.start); |
80 |
} |
81 |
|
82 |
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
83 |
{ |
84 |
Int128 start = int128_max(r1.start, r2.start); |
85 |
Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); |
86 |
return addrrange_make(start, int128_sub(end, start));
|
87 |
} |
88 |
|
89 |
enum ListenerDirection { Forward, Reverse };
|
90 |
|
91 |
static bool memory_listener_match(MemoryListener *listener, |
92 |
MemoryRegionSection *section) |
93 |
{ |
94 |
return !listener->address_space_filter
|
95 |
|| listener->address_space_filter == section->address_space; |
96 |
} |
97 |
|
98 |
#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
|
99 |
do { \
|
100 |
MemoryListener *_listener; \ |
101 |
\ |
102 |
switch (_direction) { \
|
103 |
case Forward: \
|
104 |
QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ |
105 |
if (_listener->_callback) { \
|
106 |
_listener->_callback(_listener, ##_args); \ |
107 |
} \ |
108 |
} \ |
109 |
break; \
|
110 |
case Reverse: \
|
111 |
QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ |
112 |
memory_listeners, link) { \ |
113 |
if (_listener->_callback) { \
|
114 |
_listener->_callback(_listener, ##_args); \ |
115 |
} \ |
116 |
} \ |
117 |
break; \
|
118 |
default: \
|
119 |
abort(); \ |
120 |
} \ |
121 |
} while (0) |
122 |
|
123 |
#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
|
124 |
do { \
|
125 |
MemoryListener *_listener; \ |
126 |
\ |
127 |
switch (_direction) { \
|
128 |
case Forward: \
|
129 |
QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ |
130 |
if (_listener->_callback \
|
131 |
&& memory_listener_match(_listener, _section)) { \ |
132 |
_listener->_callback(_listener, _section, ##_args); \ |
133 |
} \ |
134 |
} \ |
135 |
break; \
|
136 |
case Reverse: \
|
137 |
QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ |
138 |
memory_listeners, link) { \ |
139 |
if (_listener->_callback \
|
140 |
&& memory_listener_match(_listener, _section)) { \ |
141 |
_listener->_callback(_listener, _section, ##_args); \ |
142 |
} \ |
143 |
} \ |
144 |
break; \
|
145 |
default: \
|
146 |
abort(); \ |
147 |
} \ |
148 |
} while (0) |
149 |
|
150 |
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
|
151 |
MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ |
152 |
.mr = (fr)->mr, \ |
153 |
.address_space = (as), \ |
154 |
.offset_within_region = (fr)->offset_in_region, \ |
155 |
.size = int128_get64((fr)->addr.size), \ |
156 |
.offset_within_address_space = int128_get64((fr)->addr.start), \ |
157 |
.readonly = (fr)->readonly, \ |
158 |
})) |
159 |
|
160 |
struct CoalescedMemoryRange {
|
161 |
AddrRange addr; |
162 |
QTAILQ_ENTRY(CoalescedMemoryRange) link; |
163 |
}; |
164 |
|
165 |
struct MemoryRegionIoeventfd {
|
166 |
AddrRange addr; |
167 |
bool match_data;
|
168 |
uint64_t data; |
169 |
EventNotifier *e; |
170 |
}; |
171 |
|
172 |
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, |
173 |
MemoryRegionIoeventfd b) |
174 |
{ |
175 |
if (int128_lt(a.addr.start, b.addr.start)) {
|
176 |
return true; |
177 |
} else if (int128_gt(a.addr.start, b.addr.start)) { |
178 |
return false; |
179 |
} else if (int128_lt(a.addr.size, b.addr.size)) { |
180 |
return true; |
181 |
} else if (int128_gt(a.addr.size, b.addr.size)) { |
182 |
return false; |
183 |
} else if (a.match_data < b.match_data) { |
184 |
return true; |
185 |
} else if (a.match_data > b.match_data) { |
186 |
return false; |
187 |
} else if (a.match_data) { |
188 |
if (a.data < b.data) {
|
189 |
return true; |
190 |
} else if (a.data > b.data) { |
191 |
return false; |
192 |
} |
193 |
} |
194 |
if (a.e < b.e) {
|
195 |
return true; |
196 |
} else if (a.e > b.e) { |
197 |
return false; |
198 |
} |
199 |
return false; |
200 |
} |
201 |
|
202 |
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, |
203 |
MemoryRegionIoeventfd b) |
204 |
{ |
205 |
return !memory_region_ioeventfd_before(a, b)
|
206 |
&& !memory_region_ioeventfd_before(b, a); |
207 |
} |
208 |
|
209 |
typedef struct FlatRange FlatRange; |
210 |
typedef struct FlatView FlatView; |
211 |
|
212 |
/* Range of memory in the global map. Addresses are absolute. */
|
213 |
struct FlatRange {
|
214 |
MemoryRegion *mr; |
215 |
hwaddr offset_in_region; |
216 |
AddrRange addr; |
217 |
uint8_t dirty_log_mask; |
218 |
bool romd_mode;
|
219 |
bool readonly;
|
220 |
}; |
221 |
|
222 |
/* Flattened global view of current active memory hierarchy. Kept in sorted
|
223 |
* order.
|
224 |
*/
|
225 |
struct FlatView {
|
226 |
FlatRange *ranges; |
227 |
unsigned nr;
|
228 |
unsigned nr_allocated;
|
229 |
}; |
230 |
|
231 |
typedef struct AddressSpaceOps AddressSpaceOps; |
232 |
|
233 |
#define FOR_EACH_FLAT_RANGE(var, view) \
|
234 |
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
235 |
|
236 |
static bool flatrange_equal(FlatRange *a, FlatRange *b) |
237 |
{ |
238 |
return a->mr == b->mr
|
239 |
&& addrrange_equal(a->addr, b->addr) |
240 |
&& a->offset_in_region == b->offset_in_region |
241 |
&& a->romd_mode == b->romd_mode |
242 |
&& a->readonly == b->readonly; |
243 |
} |
244 |
|
245 |
static void flatview_init(FlatView *view) |
246 |
{ |
247 |
view->ranges = NULL;
|
248 |
view->nr = 0;
|
249 |
view->nr_allocated = 0;
|
250 |
} |
251 |
|
252 |
/* Insert a range into a given position. Caller is responsible for maintaining
|
253 |
* sorting order.
|
254 |
*/
|
255 |
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
256 |
{ |
257 |
if (view->nr == view->nr_allocated) {
|
258 |
view->nr_allocated = MAX(2 * view->nr, 10); |
259 |
view->ranges = g_realloc(view->ranges, |
260 |
view->nr_allocated * sizeof(*view->ranges));
|
261 |
} |
262 |
memmove(view->ranges + pos + 1, view->ranges + pos,
|
263 |
(view->nr - pos) * sizeof(FlatRange));
|
264 |
view->ranges[pos] = *range; |
265 |
++view->nr; |
266 |
} |
267 |
|
268 |
static void flatview_destroy(FlatView *view) |
269 |
{ |
270 |
g_free(view->ranges); |
271 |
} |
272 |
|
273 |
static bool can_merge(FlatRange *r1, FlatRange *r2) |
274 |
{ |
275 |
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
|
276 |
&& r1->mr == r2->mr |
277 |
&& int128_eq(int128_add(int128_make64(r1->offset_in_region), |
278 |
r1->addr.size), |
279 |
int128_make64(r2->offset_in_region)) |
280 |
&& r1->dirty_log_mask == r2->dirty_log_mask |
281 |
&& r1->romd_mode == r2->romd_mode |
282 |
&& r1->readonly == r2->readonly; |
283 |
} |
284 |
|
285 |
/* Attempt to simplify a view by merging ajacent ranges */
|
286 |
static void flatview_simplify(FlatView *view) |
287 |
{ |
288 |
unsigned i, j;
|
289 |
|
290 |
i = 0;
|
291 |
while (i < view->nr) {
|
292 |
j = i + 1;
|
293 |
while (j < view->nr
|
294 |
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
295 |
int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); |
296 |
++j; |
297 |
} |
298 |
++i; |
299 |
memmove(&view->ranges[i], &view->ranges[j], |
300 |
(view->nr - j) * sizeof(view->ranges[j]));
|
301 |
view->nr -= j - i; |
302 |
} |
303 |
} |
304 |
|
305 |
static void memory_region_oldmmio_read_accessor(void *opaque, |
306 |
hwaddr addr, |
307 |
uint64_t *value, |
308 |
unsigned size,
|
309 |
unsigned shift,
|
310 |
uint64_t mask) |
311 |
{ |
312 |
MemoryRegion *mr = opaque; |
313 |
uint64_t tmp; |
314 |
|
315 |
tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); |
316 |
*value |= (tmp & mask) << shift; |
317 |
} |
318 |
|
319 |
static void memory_region_read_accessor(void *opaque, |
320 |
hwaddr addr, |
321 |
uint64_t *value, |
322 |
unsigned size,
|
323 |
unsigned shift,
|
324 |
uint64_t mask) |
325 |
{ |
326 |
MemoryRegion *mr = opaque; |
327 |
uint64_t tmp; |
328 |
|
329 |
if (mr->flush_coalesced_mmio) {
|
330 |
qemu_flush_coalesced_mmio_buffer(); |
331 |
} |
332 |
tmp = mr->ops->read(mr->opaque, addr, size); |
333 |
*value |= (tmp & mask) << shift; |
334 |
} |
335 |
|
336 |
static void memory_region_oldmmio_write_accessor(void *opaque, |
337 |
hwaddr addr, |
338 |
uint64_t *value, |
339 |
unsigned size,
|
340 |
unsigned shift,
|
341 |
uint64_t mask) |
342 |
{ |
343 |
MemoryRegion *mr = opaque; |
344 |
uint64_t tmp; |
345 |
|
346 |
tmp = (*value >> shift) & mask; |
347 |
mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); |
348 |
} |
349 |
|
350 |
static void memory_region_write_accessor(void *opaque, |
351 |
hwaddr addr, |
352 |
uint64_t *value, |
353 |
unsigned size,
|
354 |
unsigned shift,
|
355 |
uint64_t mask) |
356 |
{ |
357 |
MemoryRegion *mr = opaque; |
358 |
uint64_t tmp; |
359 |
|
360 |
if (mr->flush_coalesced_mmio) {
|
361 |
qemu_flush_coalesced_mmio_buffer(); |
362 |
} |
363 |
tmp = (*value >> shift) & mask; |
364 |
mr->ops->write(mr->opaque, addr, tmp, size); |
365 |
} |
366 |
|
367 |
static void access_with_adjusted_size(hwaddr addr, |
368 |
uint64_t *value, |
369 |
unsigned size,
|
370 |
unsigned access_size_min,
|
371 |
unsigned access_size_max,
|
372 |
void (*access)(void *opaque, |
373 |
hwaddr addr, |
374 |
uint64_t *value, |
375 |
unsigned size,
|
376 |
unsigned shift,
|
377 |
uint64_t mask), |
378 |
void *opaque)
|
379 |
{ |
380 |
uint64_t access_mask; |
381 |
unsigned access_size;
|
382 |
unsigned i;
|
383 |
|
384 |
if (!access_size_min) {
|
385 |
access_size_min = 1;
|
386 |
} |
387 |
if (!access_size_max) {
|
388 |
access_size_max = 4;
|
389 |
} |
390 |
|
391 |
/* FIXME: support unaligned access? */
|
392 |
access_size = MAX(MIN(size, access_size_max), access_size_min); |
393 |
access_mask = -1ULL >> (64 - access_size * 8); |
394 |
for (i = 0; i < size; i += access_size) { |
395 |
#ifdef TARGET_WORDS_BIGENDIAN
|
396 |
access(opaque, addr + i, value, access_size, |
397 |
(size - access_size - i) * 8, access_mask);
|
398 |
#else
|
399 |
access(opaque, addr + i, value, access_size, i * 8, access_mask);
|
400 |
#endif
|
401 |
} |
402 |
} |
403 |
|
404 |
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset, |
405 |
unsigned width, bool write) |
406 |
{ |
407 |
const MemoryRegionPortio *mrp;
|
408 |
|
409 |
for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
|
410 |
if (offset >= mrp->offset && offset < mrp->offset + mrp->len
|
411 |
&& width == mrp->size |
412 |
&& (write ? (bool)mrp->write : (bool)mrp->read)) { |
413 |
return mrp;
|
414 |
} |
415 |
} |
416 |
return NULL; |
417 |
} |
418 |
|
419 |
static void memory_region_iorange_read(IORange *iorange, |
420 |
uint64_t offset, |
421 |
unsigned width,
|
422 |
uint64_t *data) |
423 |
{ |
424 |
MemoryRegionIORange *mrio |
425 |
= container_of(iorange, MemoryRegionIORange, iorange); |
426 |
MemoryRegion *mr = mrio->mr; |
427 |
|
428 |
offset += mrio->offset; |
429 |
if (mr->ops->old_portio) {
|
430 |
const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
|
431 |
width, false);
|
432 |
|
433 |
*data = ((uint64_t)1 << (width * 8)) - 1; |
434 |
if (mrp) {
|
435 |
*data = mrp->read(mr->opaque, offset); |
436 |
} else if (width == 2) { |
437 |
mrp = find_portio(mr, offset - mrio->offset, 1, false); |
438 |
assert(mrp); |
439 |
*data = mrp->read(mr->opaque, offset) | |
440 |
(mrp->read(mr->opaque, offset + 1) << 8); |
441 |
} |
442 |
return;
|
443 |
} |
444 |
*data = 0;
|
445 |
access_with_adjusted_size(offset, data, width, |
446 |
mr->ops->impl.min_access_size, |
447 |
mr->ops->impl.max_access_size, |
448 |
memory_region_read_accessor, mr); |
449 |
} |
450 |
|
451 |
static void memory_region_iorange_write(IORange *iorange, |
452 |
uint64_t offset, |
453 |
unsigned width,
|
454 |
uint64_t data) |
455 |
{ |
456 |
MemoryRegionIORange *mrio |
457 |
= container_of(iorange, MemoryRegionIORange, iorange); |
458 |
MemoryRegion *mr = mrio->mr; |
459 |
|
460 |
offset += mrio->offset; |
461 |
if (mr->ops->old_portio) {
|
462 |
const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
|
463 |
width, true);
|
464 |
|
465 |
if (mrp) {
|
466 |
mrp->write(mr->opaque, offset, data); |
467 |
} else if (width == 2) { |
468 |
mrp = find_portio(mr, offset - mrio->offset, 1, true); |
469 |
assert(mrp); |
470 |
mrp->write(mr->opaque, offset, data & 0xff);
|
471 |
mrp->write(mr->opaque, offset + 1, data >> 8); |
472 |
} |
473 |
return;
|
474 |
} |
475 |
access_with_adjusted_size(offset, &data, width, |
476 |
mr->ops->impl.min_access_size, |
477 |
mr->ops->impl.max_access_size, |
478 |
memory_region_write_accessor, mr); |
479 |
} |
480 |
|
481 |
static void memory_region_iorange_destructor(IORange *iorange) |
482 |
{ |
483 |
g_free(container_of(iorange, MemoryRegionIORange, iorange)); |
484 |
} |
485 |
|
486 |
const IORangeOps memory_region_iorange_ops = {
|
487 |
.read = memory_region_iorange_read, |
488 |
.write = memory_region_iorange_write, |
489 |
.destructor = memory_region_iorange_destructor, |
490 |
}; |
491 |
|
492 |
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
|
493 |
{ |
494 |
AddressSpace *as; |
495 |
|
496 |
while (mr->parent) {
|
497 |
mr = mr->parent; |
498 |
} |
499 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
500 |
if (mr == as->root) {
|
501 |
return as;
|
502 |
} |
503 |
} |
504 |
abort(); |
505 |
} |
506 |
|
507 |
/* Render a memory region into the global view. Ranges in @view obscure
|
508 |
* ranges in @mr.
|
509 |
*/
|
510 |
static void render_memory_region(FlatView *view, |
511 |
MemoryRegion *mr, |
512 |
Int128 base, |
513 |
AddrRange clip, |
514 |
bool readonly)
|
515 |
{ |
516 |
MemoryRegion *subregion; |
517 |
unsigned i;
|
518 |
hwaddr offset_in_region; |
519 |
Int128 remain; |
520 |
Int128 now; |
521 |
FlatRange fr; |
522 |
AddrRange tmp; |
523 |
|
524 |
if (!mr->enabled) {
|
525 |
return;
|
526 |
} |
527 |
|
528 |
int128_addto(&base, int128_make64(mr->addr)); |
529 |
readonly |= mr->readonly; |
530 |
|
531 |
tmp = addrrange_make(base, mr->size); |
532 |
|
533 |
if (!addrrange_intersects(tmp, clip)) {
|
534 |
return;
|
535 |
} |
536 |
|
537 |
clip = addrrange_intersection(tmp, clip); |
538 |
|
539 |
if (mr->alias) {
|
540 |
int128_subfrom(&base, int128_make64(mr->alias->addr)); |
541 |
int128_subfrom(&base, int128_make64(mr->alias_offset)); |
542 |
render_memory_region(view, mr->alias, base, clip, readonly); |
543 |
return;
|
544 |
} |
545 |
|
546 |
/* Render subregions in priority order. */
|
547 |
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
548 |
render_memory_region(view, subregion, base, clip, readonly); |
549 |
} |
550 |
|
551 |
if (!mr->terminates) {
|
552 |
return;
|
553 |
} |
554 |
|
555 |
offset_in_region = int128_get64(int128_sub(clip.start, base)); |
556 |
base = clip.start; |
557 |
remain = clip.size; |
558 |
|
559 |
/* Render the region itself into any gaps left by the current view. */
|
560 |
for (i = 0; i < view->nr && int128_nz(remain); ++i) { |
561 |
if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
|
562 |
continue;
|
563 |
} |
564 |
if (int128_lt(base, view->ranges[i].addr.start)) {
|
565 |
now = int128_min(remain, |
566 |
int128_sub(view->ranges[i].addr.start, base)); |
567 |
fr.mr = mr; |
568 |
fr.offset_in_region = offset_in_region; |
569 |
fr.addr = addrrange_make(base, now); |
570 |
fr.dirty_log_mask = mr->dirty_log_mask; |
571 |
fr.romd_mode = mr->romd_mode; |
572 |
fr.readonly = readonly; |
573 |
flatview_insert(view, i, &fr); |
574 |
++i; |
575 |
int128_addto(&base, now); |
576 |
offset_in_region += int128_get64(now); |
577 |
int128_subfrom(&remain, now); |
578 |
} |
579 |
now = int128_sub(int128_min(int128_add(base, remain), |
580 |
addrrange_end(view->ranges[i].addr)), |
581 |
base); |
582 |
int128_addto(&base, now); |
583 |
offset_in_region += int128_get64(now); |
584 |
int128_subfrom(&remain, now); |
585 |
} |
586 |
if (int128_nz(remain)) {
|
587 |
fr.mr = mr; |
588 |
fr.offset_in_region = offset_in_region; |
589 |
fr.addr = addrrange_make(base, remain); |
590 |
fr.dirty_log_mask = mr->dirty_log_mask; |
591 |
fr.romd_mode = mr->romd_mode; |
592 |
fr.readonly = readonly; |
593 |
flatview_insert(view, i, &fr); |
594 |
} |
595 |
} |
596 |
|
597 |
/* Render a memory topology into a list of disjoint absolute ranges. */
|
598 |
static FlatView generate_memory_topology(MemoryRegion *mr)
|
599 |
{ |
600 |
FlatView view; |
601 |
|
602 |
flatview_init(&view); |
603 |
|
604 |
if (mr) {
|
605 |
render_memory_region(&view, mr, int128_zero(), |
606 |
addrrange_make(int128_zero(), int128_2_64()), false);
|
607 |
} |
608 |
flatview_simplify(&view); |
609 |
|
610 |
return view;
|
611 |
} |
612 |
|
613 |
static void address_space_add_del_ioeventfds(AddressSpace *as, |
614 |
MemoryRegionIoeventfd *fds_new, |
615 |
unsigned fds_new_nb,
|
616 |
MemoryRegionIoeventfd *fds_old, |
617 |
unsigned fds_old_nb)
|
618 |
{ |
619 |
unsigned iold, inew;
|
620 |
MemoryRegionIoeventfd *fd; |
621 |
MemoryRegionSection section; |
622 |
|
623 |
/* Generate a symmetric difference of the old and new fd sets, adding
|
624 |
* and deleting as necessary.
|
625 |
*/
|
626 |
|
627 |
iold = inew = 0;
|
628 |
while (iold < fds_old_nb || inew < fds_new_nb) {
|
629 |
if (iold < fds_old_nb
|
630 |
&& (inew == fds_new_nb |
631 |
|| memory_region_ioeventfd_before(fds_old[iold], |
632 |
fds_new[inew]))) { |
633 |
fd = &fds_old[iold]; |
634 |
section = (MemoryRegionSection) { |
635 |
.address_space = as, |
636 |
.offset_within_address_space = int128_get64(fd->addr.start), |
637 |
.size = int128_get64(fd->addr.size), |
638 |
}; |
639 |
MEMORY_LISTENER_CALL(eventfd_del, Forward, §ion, |
640 |
fd->match_data, fd->data, fd->e); |
641 |
++iold; |
642 |
} else if (inew < fds_new_nb |
643 |
&& (iold == fds_old_nb |
644 |
|| memory_region_ioeventfd_before(fds_new[inew], |
645 |
fds_old[iold]))) { |
646 |
fd = &fds_new[inew]; |
647 |
section = (MemoryRegionSection) { |
648 |
.address_space = as, |
649 |
.offset_within_address_space = int128_get64(fd->addr.start), |
650 |
.size = int128_get64(fd->addr.size), |
651 |
}; |
652 |
MEMORY_LISTENER_CALL(eventfd_add, Reverse, §ion, |
653 |
fd->match_data, fd->data, fd->e); |
654 |
++inew; |
655 |
} else {
|
656 |
++iold; |
657 |
++inew; |
658 |
} |
659 |
} |
660 |
} |
661 |
|
662 |
static void address_space_update_ioeventfds(AddressSpace *as) |
663 |
{ |
664 |
FlatRange *fr; |
665 |
unsigned ioeventfd_nb = 0; |
666 |
MemoryRegionIoeventfd *ioeventfds = NULL;
|
667 |
AddrRange tmp; |
668 |
unsigned i;
|
669 |
|
670 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
671 |
for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { |
672 |
tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, |
673 |
int128_sub(fr->addr.start, |
674 |
int128_make64(fr->offset_in_region))); |
675 |
if (addrrange_intersects(fr->addr, tmp)) {
|
676 |
++ioeventfd_nb; |
677 |
ioeventfds = g_realloc(ioeventfds, |
678 |
ioeventfd_nb * sizeof(*ioeventfds));
|
679 |
ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
|
680 |
ioeventfds[ioeventfd_nb-1].addr = tmp;
|
681 |
} |
682 |
} |
683 |
} |
684 |
|
685 |
address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, |
686 |
as->ioeventfds, as->ioeventfd_nb); |
687 |
|
688 |
g_free(as->ioeventfds); |
689 |
as->ioeventfds = ioeventfds; |
690 |
as->ioeventfd_nb = ioeventfd_nb; |
691 |
} |
692 |
|
693 |
static void address_space_update_topology_pass(AddressSpace *as, |
694 |
FlatView old_view, |
695 |
FlatView new_view, |
696 |
bool adding)
|
697 |
{ |
698 |
unsigned iold, inew;
|
699 |
FlatRange *frold, *frnew; |
700 |
|
701 |
/* Generate a symmetric difference of the old and new memory maps.
|
702 |
* Kill ranges in the old map, and instantiate ranges in the new map.
|
703 |
*/
|
704 |
iold = inew = 0;
|
705 |
while (iold < old_view.nr || inew < new_view.nr) {
|
706 |
if (iold < old_view.nr) {
|
707 |
frold = &old_view.ranges[iold]; |
708 |
} else {
|
709 |
frold = NULL;
|
710 |
} |
711 |
if (inew < new_view.nr) {
|
712 |
frnew = &new_view.ranges[inew]; |
713 |
} else {
|
714 |
frnew = NULL;
|
715 |
} |
716 |
|
717 |
if (frold
|
718 |
&& (!frnew |
719 |
|| int128_lt(frold->addr.start, frnew->addr.start) |
720 |
|| (int128_eq(frold->addr.start, frnew->addr.start) |
721 |
&& !flatrange_equal(frold, frnew)))) { |
722 |
/* In old, but (not in new, or in new but attributes changed). */
|
723 |
|
724 |
if (!adding) {
|
725 |
MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); |
726 |
} |
727 |
|
728 |
++iold; |
729 |
} else if (frold && frnew && flatrange_equal(frold, frnew)) { |
730 |
/* In both (logging may have changed) */
|
731 |
|
732 |
if (adding) {
|
733 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); |
734 |
if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
|
735 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop); |
736 |
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
737 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start); |
738 |
} |
739 |
} |
740 |
|
741 |
++iold; |
742 |
++inew; |
743 |
} else {
|
744 |
/* In new */
|
745 |
|
746 |
if (adding) {
|
747 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); |
748 |
} |
749 |
|
750 |
++inew; |
751 |
} |
752 |
} |
753 |
} |
754 |
|
755 |
|
756 |
static void address_space_update_topology(AddressSpace *as) |
757 |
{ |
758 |
FlatView old_view = *as->current_map; |
759 |
FlatView new_view = generate_memory_topology(as->root); |
760 |
|
761 |
address_space_update_topology_pass(as, old_view, new_view, false);
|
762 |
address_space_update_topology_pass(as, old_view, new_view, true);
|
763 |
|
764 |
*as->current_map = new_view; |
765 |
flatview_destroy(&old_view); |
766 |
address_space_update_ioeventfds(as); |
767 |
} |
768 |
|
769 |
void memory_region_transaction_begin(void) |
770 |
{ |
771 |
qemu_flush_coalesced_mmio_buffer(); |
772 |
++memory_region_transaction_depth; |
773 |
} |
774 |
|
775 |
void memory_region_transaction_commit(void) |
776 |
{ |
777 |
AddressSpace *as; |
778 |
|
779 |
assert(memory_region_transaction_depth); |
780 |
--memory_region_transaction_depth; |
781 |
if (!memory_region_transaction_depth && memory_region_update_pending) {
|
782 |
memory_region_update_pending = false;
|
783 |
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); |
784 |
|
785 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
786 |
address_space_update_topology(as); |
787 |
} |
788 |
|
789 |
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); |
790 |
} |
791 |
} |
792 |
|
793 |
static void memory_region_destructor_none(MemoryRegion *mr) |
794 |
{ |
795 |
} |
796 |
|
797 |
static void memory_region_destructor_ram(MemoryRegion *mr) |
798 |
{ |
799 |
qemu_ram_free(mr->ram_addr); |
800 |
} |
801 |
|
802 |
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) |
803 |
{ |
804 |
qemu_ram_free_from_ptr(mr->ram_addr); |
805 |
} |
806 |
|
807 |
static void memory_region_destructor_rom_device(MemoryRegion *mr) |
808 |
{ |
809 |
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); |
810 |
} |
811 |
|
812 |
static bool memory_region_wrong_endianness(MemoryRegion *mr) |
813 |
{ |
814 |
#ifdef TARGET_WORDS_BIGENDIAN
|
815 |
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
|
816 |
#else
|
817 |
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
818 |
#endif
|
819 |
} |
820 |
|
821 |
void memory_region_init(MemoryRegion *mr,
|
822 |
const char *name, |
823 |
uint64_t size) |
824 |
{ |
825 |
mr->ops = &unassigned_mem_ops; |
826 |
mr->opaque = NULL;
|
827 |
mr->parent = NULL;
|
828 |
mr->size = int128_make64(size); |
829 |
if (size == UINT64_MAX) {
|
830 |
mr->size = int128_2_64(); |
831 |
} |
832 |
mr->addr = 0;
|
833 |
mr->subpage = false;
|
834 |
mr->enabled = true;
|
835 |
mr->terminates = false;
|
836 |
mr->ram = false;
|
837 |
mr->romd_mode = true;
|
838 |
mr->readonly = false;
|
839 |
mr->rom_device = false;
|
840 |
mr->destructor = memory_region_destructor_none; |
841 |
mr->priority = 0;
|
842 |
mr->may_overlap = false;
|
843 |
mr->alias = NULL;
|
844 |
QTAILQ_INIT(&mr->subregions); |
845 |
memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
846 |
QTAILQ_INIT(&mr->coalesced); |
847 |
mr->name = g_strdup(name); |
848 |
mr->dirty_log_mask = 0;
|
849 |
mr->ioeventfd_nb = 0;
|
850 |
mr->ioeventfds = NULL;
|
851 |
mr->flush_coalesced_mmio = false;
|
852 |
} |
853 |
|
854 |
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, |
855 |
unsigned size)
|
856 |
{ |
857 |
#ifdef DEBUG_UNASSIGNED
|
858 |
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
859 |
#endif
|
860 |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
861 |
cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size); |
862 |
#endif
|
863 |
return 0; |
864 |
} |
865 |
|
866 |
static void unassigned_mem_write(void *opaque, hwaddr addr, |
867 |
uint64_t val, unsigned size)
|
868 |
{ |
869 |
#ifdef DEBUG_UNASSIGNED
|
870 |
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); |
871 |
#endif
|
872 |
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
|
873 |
cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size); |
874 |
#endif
|
875 |
} |
876 |
|
877 |
static bool unassigned_mem_accepts(void *opaque, hwaddr addr, |
878 |
unsigned size, bool is_write) |
879 |
{ |
880 |
return false; |
881 |
} |
882 |
|
883 |
const MemoryRegionOps unassigned_mem_ops = {
|
884 |
.valid.accepts = unassigned_mem_accepts, |
885 |
.endianness = DEVICE_NATIVE_ENDIAN, |
886 |
}; |
887 |
|
888 |
bool memory_region_access_valid(MemoryRegion *mr,
|
889 |
hwaddr addr, |
890 |
unsigned size,
|
891 |
bool is_write)
|
892 |
{ |
893 |
int access_size_min, access_size_max;
|
894 |
int access_size, i;
|
895 |
|
896 |
if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
897 |
return false; |
898 |
} |
899 |
|
900 |
if (!mr->ops->valid.accepts) {
|
901 |
return true; |
902 |
} |
903 |
|
904 |
access_size_min = mr->ops->valid.min_access_size; |
905 |
if (!mr->ops->valid.min_access_size) {
|
906 |
access_size_min = 1;
|
907 |
} |
908 |
|
909 |
access_size_max = mr->ops->valid.max_access_size; |
910 |
if (!mr->ops->valid.max_access_size) {
|
911 |
access_size_max = 4;
|
912 |
} |
913 |
|
914 |
access_size = MAX(MIN(size, access_size_max), access_size_min); |
915 |
for (i = 0; i < size; i += access_size) { |
916 |
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
|
917 |
is_write)) { |
918 |
return false; |
919 |
} |
920 |
} |
921 |
|
922 |
return true; |
923 |
} |
924 |
|
925 |
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
|
926 |
hwaddr addr, |
927 |
unsigned size)
|
928 |
{ |
929 |
uint64_t data = 0;
|
930 |
|
931 |
if (!memory_region_access_valid(mr, addr, size, false)) { |
932 |
return unassigned_mem_read(mr, addr, size);
|
933 |
} |
934 |
|
935 |
if (mr->ops->read) {
|
936 |
access_with_adjusted_size(addr, &data, size, |
937 |
mr->ops->impl.min_access_size, |
938 |
mr->ops->impl.max_access_size, |
939 |
memory_region_read_accessor, mr); |
940 |
} else {
|
941 |
access_with_adjusted_size(addr, &data, size, 1, 4, |
942 |
memory_region_oldmmio_read_accessor, mr); |
943 |
} |
944 |
|
945 |
return data;
|
946 |
} |
947 |
|
948 |
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) |
949 |
{ |
950 |
if (memory_region_wrong_endianness(mr)) {
|
951 |
switch (size) {
|
952 |
case 1: |
953 |
break;
|
954 |
case 2: |
955 |
*data = bswap16(*data); |
956 |
break;
|
957 |
case 4: |
958 |
*data = bswap32(*data); |
959 |
break;
|
960 |
default:
|
961 |
abort(); |
962 |
} |
963 |
} |
964 |
} |
965 |
|
966 |
static uint64_t memory_region_dispatch_read(MemoryRegion *mr,
|
967 |
hwaddr addr, |
968 |
unsigned size)
|
969 |
{ |
970 |
uint64_t ret; |
971 |
|
972 |
ret = memory_region_dispatch_read1(mr, addr, size); |
973 |
adjust_endianness(mr, &ret, size); |
974 |
return ret;
|
975 |
} |
976 |
|
977 |
static void memory_region_dispatch_write(MemoryRegion *mr, |
978 |
hwaddr addr, |
979 |
uint64_t data, |
980 |
unsigned size)
|
981 |
{ |
982 |
if (!memory_region_access_valid(mr, addr, size, true)) { |
983 |
unassigned_mem_write(mr, addr, data, size); |
984 |
return;
|
985 |
} |
986 |
|
987 |
adjust_endianness(mr, &data, size); |
988 |
|
989 |
if (mr->ops->write) {
|
990 |
access_with_adjusted_size(addr, &data, size, |
991 |
mr->ops->impl.min_access_size, |
992 |
mr->ops->impl.max_access_size, |
993 |
memory_region_write_accessor, mr); |
994 |
} else {
|
995 |
access_with_adjusted_size(addr, &data, size, 1, 4, |
996 |
memory_region_oldmmio_write_accessor, mr); |
997 |
} |
998 |
} |
999 |
|
1000 |
void memory_region_init_io(MemoryRegion *mr,
|
1001 |
const MemoryRegionOps *ops,
|
1002 |
void *opaque,
|
1003 |
const char *name, |
1004 |
uint64_t size) |
1005 |
{ |
1006 |
memory_region_init(mr, name, size); |
1007 |
mr->ops = ops; |
1008 |
mr->opaque = opaque; |
1009 |
mr->terminates = true;
|
1010 |
mr->ram_addr = ~(ram_addr_t)0;
|
1011 |
} |
1012 |
|
1013 |
void memory_region_init_ram(MemoryRegion *mr,
|
1014 |
const char *name, |
1015 |
uint64_t size) |
1016 |
{ |
1017 |
memory_region_init(mr, name, size); |
1018 |
mr->ram = true;
|
1019 |
mr->terminates = true;
|
1020 |
mr->destructor = memory_region_destructor_ram; |
1021 |
mr->ram_addr = qemu_ram_alloc(size, mr); |
1022 |
} |
1023 |
|
1024 |
void memory_region_init_ram_ptr(MemoryRegion *mr,
|
1025 |
const char *name, |
1026 |
uint64_t size, |
1027 |
void *ptr)
|
1028 |
{ |
1029 |
memory_region_init(mr, name, size); |
1030 |
mr->ram = true;
|
1031 |
mr->terminates = true;
|
1032 |
mr->destructor = memory_region_destructor_ram_from_ptr; |
1033 |
mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr); |
1034 |
} |
1035 |
|
1036 |
void memory_region_init_alias(MemoryRegion *mr,
|
1037 |
const char *name, |
1038 |
MemoryRegion *orig, |
1039 |
hwaddr offset, |
1040 |
uint64_t size) |
1041 |
{ |
1042 |
memory_region_init(mr, name, size); |
1043 |
mr->alias = orig; |
1044 |
mr->alias_offset = offset; |
1045 |
} |
1046 |
|
1047 |
void memory_region_init_rom_device(MemoryRegion *mr,
|
1048 |
const MemoryRegionOps *ops,
|
1049 |
void *opaque,
|
1050 |
const char *name, |
1051 |
uint64_t size) |
1052 |
{ |
1053 |
memory_region_init(mr, name, size); |
1054 |
mr->ops = ops; |
1055 |
mr->opaque = opaque; |
1056 |
mr->terminates = true;
|
1057 |
mr->rom_device = true;
|
1058 |
mr->destructor = memory_region_destructor_rom_device; |
1059 |
mr->ram_addr = qemu_ram_alloc(size, mr); |
1060 |
} |
1061 |
|
1062 |
void memory_region_init_reservation(MemoryRegion *mr,
|
1063 |
const char *name, |
1064 |
uint64_t size) |
1065 |
{ |
1066 |
memory_region_init_io(mr, &unassigned_mem_ops, mr, name, size); |
1067 |
} |
1068 |
|
1069 |
void memory_region_destroy(MemoryRegion *mr)
|
1070 |
{ |
1071 |
assert(QTAILQ_EMPTY(&mr->subregions)); |
1072 |
assert(memory_region_transaction_depth == 0);
|
1073 |
mr->destructor(mr); |
1074 |
memory_region_clear_coalescing(mr); |
1075 |
g_free((char *)mr->name);
|
1076 |
g_free(mr->ioeventfds); |
1077 |
} |
1078 |
|
1079 |
uint64_t memory_region_size(MemoryRegion *mr) |
1080 |
{ |
1081 |
if (int128_eq(mr->size, int128_2_64())) {
|
1082 |
return UINT64_MAX;
|
1083 |
} |
1084 |
return int128_get64(mr->size);
|
1085 |
} |
1086 |
|
1087 |
const char *memory_region_name(MemoryRegion *mr) |
1088 |
{ |
1089 |
return mr->name;
|
1090 |
} |
1091 |
|
1092 |
bool memory_region_is_ram(MemoryRegion *mr)
|
1093 |
{ |
1094 |
return mr->ram;
|
1095 |
} |
1096 |
|
1097 |
bool memory_region_is_logging(MemoryRegion *mr)
|
1098 |
{ |
1099 |
return mr->dirty_log_mask;
|
1100 |
} |
1101 |
|
1102 |
bool memory_region_is_rom(MemoryRegion *mr)
|
1103 |
{ |
1104 |
return mr->ram && mr->readonly;
|
1105 |
} |
1106 |
|
1107 |
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
1108 |
{ |
1109 |
uint8_t mask = 1 << client;
|
1110 |
|
1111 |
memory_region_transaction_begin(); |
1112 |
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
1113 |
memory_region_update_pending |= mr->enabled; |
1114 |
memory_region_transaction_commit(); |
1115 |
} |
1116 |
|
1117 |
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
|
1118 |
hwaddr size, unsigned client)
|
1119 |
{ |
1120 |
assert(mr->terminates); |
1121 |
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
|
1122 |
1 << client);
|
1123 |
} |
1124 |
|
1125 |
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
|
1126 |
hwaddr size) |
1127 |
{ |
1128 |
assert(mr->terminates); |
1129 |
return cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1); |
1130 |
} |
1131 |
|
1132 |
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
|
1133 |
hwaddr size, unsigned client)
|
1134 |
{ |
1135 |
bool ret;
|
1136 |
assert(mr->terminates); |
1137 |
ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, |
1138 |
1 << client);
|
1139 |
if (ret) {
|
1140 |
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
1141 |
mr->ram_addr + addr + size, |
1142 |
1 << client);
|
1143 |
} |
1144 |
return ret;
|
1145 |
} |
1146 |
|
1147 |
|
1148 |
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
|
1149 |
{ |
1150 |
AddressSpace *as; |
1151 |
FlatRange *fr; |
1152 |
|
1153 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1154 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1155 |
if (fr->mr == mr) {
|
1156 |
MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); |
1157 |
} |
1158 |
} |
1159 |
} |
1160 |
} |
1161 |
|
1162 |
void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
1163 |
{ |
1164 |
if (mr->readonly != readonly) {
|
1165 |
memory_region_transaction_begin(); |
1166 |
mr->readonly = readonly; |
1167 |
memory_region_update_pending |= mr->enabled; |
1168 |
memory_region_transaction_commit(); |
1169 |
} |
1170 |
} |
1171 |
|
1172 |
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) |
1173 |
{ |
1174 |
if (mr->romd_mode != romd_mode) {
|
1175 |
memory_region_transaction_begin(); |
1176 |
mr->romd_mode = romd_mode; |
1177 |
memory_region_update_pending |= mr->enabled; |
1178 |
memory_region_transaction_commit(); |
1179 |
} |
1180 |
} |
1181 |
|
1182 |
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
|
1183 |
hwaddr size, unsigned client)
|
1184 |
{ |
1185 |
assert(mr->terminates); |
1186 |
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
1187 |
mr->ram_addr + addr + size, |
1188 |
1 << client);
|
1189 |
} |
1190 |
|
1191 |
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
1192 |
{ |
1193 |
if (mr->alias) {
|
1194 |
return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
|
1195 |
} |
1196 |
|
1197 |
assert(mr->terminates); |
1198 |
|
1199 |
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
|
1200 |
} |
1201 |
|
1202 |
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) |
1203 |
{ |
1204 |
FlatRange *fr; |
1205 |
CoalescedMemoryRange *cmr; |
1206 |
AddrRange tmp; |
1207 |
MemoryRegionSection section; |
1208 |
|
1209 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1210 |
if (fr->mr == mr) {
|
1211 |
section = (MemoryRegionSection) { |
1212 |
.address_space = as, |
1213 |
.offset_within_address_space = int128_get64(fr->addr.start), |
1214 |
.size = int128_get64(fr->addr.size), |
1215 |
}; |
1216 |
|
1217 |
MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion, |
1218 |
int128_get64(fr->addr.start), |
1219 |
int128_get64(fr->addr.size)); |
1220 |
QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
1221 |
tmp = addrrange_shift(cmr->addr, |
1222 |
int128_sub(fr->addr.start, |
1223 |
int128_make64(fr->offset_in_region))); |
1224 |
if (!addrrange_intersects(tmp, fr->addr)) {
|
1225 |
continue;
|
1226 |
} |
1227 |
tmp = addrrange_intersection(tmp, fr->addr); |
1228 |
MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion, |
1229 |
int128_get64(tmp.start), |
1230 |
int128_get64(tmp.size)); |
1231 |
} |
1232 |
} |
1233 |
} |
1234 |
} |
1235 |
|
1236 |
static void memory_region_update_coalesced_range(MemoryRegion *mr) |
1237 |
{ |
1238 |
AddressSpace *as; |
1239 |
|
1240 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1241 |
memory_region_update_coalesced_range_as(mr, as); |
1242 |
} |
1243 |
} |
1244 |
|
1245 |
void memory_region_set_coalescing(MemoryRegion *mr)
|
1246 |
{ |
1247 |
memory_region_clear_coalescing(mr); |
1248 |
memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
|
1249 |
} |
1250 |
|
1251 |
void memory_region_add_coalescing(MemoryRegion *mr,
|
1252 |
hwaddr offset, |
1253 |
uint64_t size) |
1254 |
{ |
1255 |
CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
|
1256 |
|
1257 |
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); |
1258 |
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
1259 |
memory_region_update_coalesced_range(mr); |
1260 |
memory_region_set_flush_coalesced(mr); |
1261 |
} |
1262 |
|
1263 |
void memory_region_clear_coalescing(MemoryRegion *mr)
|
1264 |
{ |
1265 |
CoalescedMemoryRange *cmr; |
1266 |
|
1267 |
qemu_flush_coalesced_mmio_buffer(); |
1268 |
mr->flush_coalesced_mmio = false;
|
1269 |
|
1270 |
while (!QTAILQ_EMPTY(&mr->coalesced)) {
|
1271 |
cmr = QTAILQ_FIRST(&mr->coalesced); |
1272 |
QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
1273 |
g_free(cmr); |
1274 |
} |
1275 |
memory_region_update_coalesced_range(mr); |
1276 |
} |
1277 |
|
1278 |
void memory_region_set_flush_coalesced(MemoryRegion *mr)
|
1279 |
{ |
1280 |
mr->flush_coalesced_mmio = true;
|
1281 |
} |
1282 |
|
1283 |
void memory_region_clear_flush_coalesced(MemoryRegion *mr)
|
1284 |
{ |
1285 |
qemu_flush_coalesced_mmio_buffer(); |
1286 |
if (QTAILQ_EMPTY(&mr->coalesced)) {
|
1287 |
mr->flush_coalesced_mmio = false;
|
1288 |
} |
1289 |
} |
1290 |
|
1291 |
void memory_region_add_eventfd(MemoryRegion *mr,
|
1292 |
hwaddr addr, |
1293 |
unsigned size,
|
1294 |
bool match_data,
|
1295 |
uint64_t data, |
1296 |
EventNotifier *e) |
1297 |
{ |
1298 |
MemoryRegionIoeventfd mrfd = { |
1299 |
.addr.start = int128_make64(addr), |
1300 |
.addr.size = int128_make64(size), |
1301 |
.match_data = match_data, |
1302 |
.data = data, |
1303 |
.e = e, |
1304 |
}; |
1305 |
unsigned i;
|
1306 |
|
1307 |
adjust_endianness(mr, &mrfd.data, size); |
1308 |
memory_region_transaction_begin(); |
1309 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1310 |
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
|
1311 |
break;
|
1312 |
} |
1313 |
} |
1314 |
++mr->ioeventfd_nb; |
1315 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1316 |
sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
|
1317 |
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
|
1318 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); |
1319 |
mr->ioeventfds[i] = mrfd; |
1320 |
memory_region_update_pending |= mr->enabled; |
1321 |
memory_region_transaction_commit(); |
1322 |
} |
1323 |
|
1324 |
void memory_region_del_eventfd(MemoryRegion *mr,
|
1325 |
hwaddr addr, |
1326 |
unsigned size,
|
1327 |
bool match_data,
|
1328 |
uint64_t data, |
1329 |
EventNotifier *e) |
1330 |
{ |
1331 |
MemoryRegionIoeventfd mrfd = { |
1332 |
.addr.start = int128_make64(addr), |
1333 |
.addr.size = int128_make64(size), |
1334 |
.match_data = match_data, |
1335 |
.data = data, |
1336 |
.e = e, |
1337 |
}; |
1338 |
unsigned i;
|
1339 |
|
1340 |
adjust_endianness(mr, &mrfd.data, size); |
1341 |
memory_region_transaction_begin(); |
1342 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1343 |
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
|
1344 |
break;
|
1345 |
} |
1346 |
} |
1347 |
assert(i != mr->ioeventfd_nb); |
1348 |
memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
|
1349 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); |
1350 |
--mr->ioeventfd_nb; |
1351 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1352 |
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); |
1353 |
memory_region_update_pending |= mr->enabled; |
1354 |
memory_region_transaction_commit(); |
1355 |
} |
1356 |
|
1357 |
static void memory_region_add_subregion_common(MemoryRegion *mr, |
1358 |
hwaddr offset, |
1359 |
MemoryRegion *subregion) |
1360 |
{ |
1361 |
MemoryRegion *other; |
1362 |
|
1363 |
memory_region_transaction_begin(); |
1364 |
|
1365 |
assert(!subregion->parent); |
1366 |
subregion->parent = mr; |
1367 |
subregion->addr = offset; |
1368 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1369 |
if (subregion->may_overlap || other->may_overlap) {
|
1370 |
continue;
|
1371 |
} |
1372 |
if (int128_ge(int128_make64(offset),
|
1373 |
int128_add(int128_make64(other->addr), other->size)) |
1374 |
|| int128_le(int128_add(int128_make64(offset), subregion->size), |
1375 |
int128_make64(other->addr))) { |
1376 |
continue;
|
1377 |
} |
1378 |
#if 0
|
1379 |
printf("warning: subregion collision %llx/%llx (%s) "
|
1380 |
"vs %llx/%llx (%s)\n",
|
1381 |
(unsigned long long)offset,
|
1382 |
(unsigned long long)int128_get64(subregion->size),
|
1383 |
subregion->name,
|
1384 |
(unsigned long long)other->addr,
|
1385 |
(unsigned long long)int128_get64(other->size),
|
1386 |
other->name);
|
1387 |
#endif
|
1388 |
} |
1389 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1390 |
if (subregion->priority >= other->priority) {
|
1391 |
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
1392 |
goto done;
|
1393 |
} |
1394 |
} |
1395 |
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
1396 |
done:
|
1397 |
memory_region_update_pending |= mr->enabled && subregion->enabled; |
1398 |
memory_region_transaction_commit(); |
1399 |
} |
1400 |
|
1401 |
|
1402 |
void memory_region_add_subregion(MemoryRegion *mr,
|
1403 |
hwaddr offset, |
1404 |
MemoryRegion *subregion) |
1405 |
{ |
1406 |
subregion->may_overlap = false;
|
1407 |
subregion->priority = 0;
|
1408 |
memory_region_add_subregion_common(mr, offset, subregion); |
1409 |
} |
1410 |
|
1411 |
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
1412 |
hwaddr offset, |
1413 |
MemoryRegion *subregion, |
1414 |
unsigned priority)
|
1415 |
{ |
1416 |
subregion->may_overlap = true;
|
1417 |
subregion->priority = priority; |
1418 |
memory_region_add_subregion_common(mr, offset, subregion); |
1419 |
} |
1420 |
|
1421 |
void memory_region_del_subregion(MemoryRegion *mr,
|
1422 |
MemoryRegion *subregion) |
1423 |
{ |
1424 |
memory_region_transaction_begin(); |
1425 |
assert(subregion->parent == mr); |
1426 |
subregion->parent = NULL;
|
1427 |
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
1428 |
memory_region_update_pending |= mr->enabled && subregion->enabled; |
1429 |
memory_region_transaction_commit(); |
1430 |
} |
1431 |
|
1432 |
void memory_region_set_enabled(MemoryRegion *mr, bool enabled) |
1433 |
{ |
1434 |
if (enabled == mr->enabled) {
|
1435 |
return;
|
1436 |
} |
1437 |
memory_region_transaction_begin(); |
1438 |
mr->enabled = enabled; |
1439 |
memory_region_update_pending = true;
|
1440 |
memory_region_transaction_commit(); |
1441 |
} |
1442 |
|
1443 |
void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
|
1444 |
{ |
1445 |
MemoryRegion *parent = mr->parent; |
1446 |
unsigned priority = mr->priority;
|
1447 |
bool may_overlap = mr->may_overlap;
|
1448 |
|
1449 |
if (addr == mr->addr || !parent) {
|
1450 |
mr->addr = addr; |
1451 |
return;
|
1452 |
} |
1453 |
|
1454 |
memory_region_transaction_begin(); |
1455 |
memory_region_del_subregion(parent, mr); |
1456 |
if (may_overlap) {
|
1457 |
memory_region_add_subregion_overlap(parent, addr, mr, priority); |
1458 |
} else {
|
1459 |
memory_region_add_subregion(parent, addr, mr); |
1460 |
} |
1461 |
memory_region_transaction_commit(); |
1462 |
} |
1463 |
|
1464 |
void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
|
1465 |
{ |
1466 |
assert(mr->alias); |
1467 |
|
1468 |
if (offset == mr->alias_offset) {
|
1469 |
return;
|
1470 |
} |
1471 |
|
1472 |
memory_region_transaction_begin(); |
1473 |
mr->alias_offset = offset; |
1474 |
memory_region_update_pending |= mr->enabled; |
1475 |
memory_region_transaction_commit(); |
1476 |
} |
1477 |
|
1478 |
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) |
1479 |
{ |
1480 |
return mr->ram_addr;
|
1481 |
} |
1482 |
|
1483 |
static int cmp_flatrange_addr(const void *addr_, const void *fr_) |
1484 |
{ |
1485 |
const AddrRange *addr = addr_;
|
1486 |
const FlatRange *fr = fr_;
|
1487 |
|
1488 |
if (int128_le(addrrange_end(*addr), fr->addr.start)) {
|
1489 |
return -1; |
1490 |
} else if (int128_ge(addr->start, addrrange_end(fr->addr))) { |
1491 |
return 1; |
1492 |
} |
1493 |
return 0; |
1494 |
} |
1495 |
|
1496 |
static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
|
1497 |
{ |
1498 |
return bsearch(&addr, as->current_map->ranges, as->current_map->nr,
|
1499 |
sizeof(FlatRange), cmp_flatrange_addr);
|
1500 |
} |
1501 |
|
1502 |
MemoryRegionSection memory_region_find(MemoryRegion *mr, |
1503 |
hwaddr addr, uint64_t size) |
1504 |
{ |
1505 |
MemoryRegionSection ret = { .mr = NULL, .size = 0 }; |
1506 |
MemoryRegion *root; |
1507 |
AddressSpace *as; |
1508 |
AddrRange range; |
1509 |
FlatRange *fr; |
1510 |
|
1511 |
addr += mr->addr; |
1512 |
for (root = mr; root->parent; ) {
|
1513 |
root = root->parent; |
1514 |
addr += root->addr; |
1515 |
} |
1516 |
|
1517 |
as = memory_region_to_address_space(root); |
1518 |
range = addrrange_make(int128_make64(addr), int128_make64(size)); |
1519 |
fr = address_space_lookup(as, range); |
1520 |
if (!fr) {
|
1521 |
return ret;
|
1522 |
} |
1523 |
|
1524 |
while (fr > as->current_map->ranges
|
1525 |
&& addrrange_intersects(fr[-1].addr, range)) {
|
1526 |
--fr; |
1527 |
} |
1528 |
|
1529 |
ret.mr = fr->mr; |
1530 |
ret.address_space = as; |
1531 |
range = addrrange_intersection(range, fr->addr); |
1532 |
ret.offset_within_region = fr->offset_in_region; |
1533 |
ret.offset_within_region += int128_get64(int128_sub(range.start, |
1534 |
fr->addr.start)); |
1535 |
ret.size = int128_get64(range.size); |
1536 |
ret.offset_within_address_space = int128_get64(range.start); |
1537 |
ret.readonly = fr->readonly; |
1538 |
return ret;
|
1539 |
} |
1540 |
|
1541 |
void address_space_sync_dirty_bitmap(AddressSpace *as)
|
1542 |
{ |
1543 |
FlatRange *fr; |
1544 |
|
1545 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1546 |
MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); |
1547 |
} |
1548 |
} |
1549 |
|
1550 |
void memory_global_dirty_log_start(void) |
1551 |
{ |
1552 |
global_dirty_log = true;
|
1553 |
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); |
1554 |
} |
1555 |
|
1556 |
void memory_global_dirty_log_stop(void) |
1557 |
{ |
1558 |
global_dirty_log = false;
|
1559 |
MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); |
1560 |
} |
1561 |
|
1562 |
static void listener_add_address_space(MemoryListener *listener, |
1563 |
AddressSpace *as) |
1564 |
{ |
1565 |
FlatRange *fr; |
1566 |
|
1567 |
if (listener->address_space_filter
|
1568 |
&& listener->address_space_filter != as) { |
1569 |
return;
|
1570 |
} |
1571 |
|
1572 |
if (global_dirty_log) {
|
1573 |
if (listener->log_global_start) {
|
1574 |
listener->log_global_start(listener); |
1575 |
} |
1576 |
} |
1577 |
|
1578 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1579 |
MemoryRegionSection section = { |
1580 |
.mr = fr->mr, |
1581 |
.address_space = as, |
1582 |
.offset_within_region = fr->offset_in_region, |
1583 |
.size = int128_get64(fr->addr.size), |
1584 |
.offset_within_address_space = int128_get64(fr->addr.start), |
1585 |
.readonly = fr->readonly, |
1586 |
}; |
1587 |
if (listener->region_add) {
|
1588 |
listener->region_add(listener, §ion); |
1589 |
} |
1590 |
} |
1591 |
} |
1592 |
|
1593 |
void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
|
1594 |
{ |
1595 |
MemoryListener *other = NULL;
|
1596 |
AddressSpace *as; |
1597 |
|
1598 |
listener->address_space_filter = filter; |
1599 |
if (QTAILQ_EMPTY(&memory_listeners)
|
1600 |
|| listener->priority >= QTAILQ_LAST(&memory_listeners, |
1601 |
memory_listeners)->priority) { |
1602 |
QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); |
1603 |
} else {
|
1604 |
QTAILQ_FOREACH(other, &memory_listeners, link) { |
1605 |
if (listener->priority < other->priority) {
|
1606 |
break;
|
1607 |
} |
1608 |
} |
1609 |
QTAILQ_INSERT_BEFORE(other, listener, link); |
1610 |
} |
1611 |
|
1612 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1613 |
listener_add_address_space(listener, as); |
1614 |
} |
1615 |
} |
1616 |
|
1617 |
void memory_listener_unregister(MemoryListener *listener)
|
1618 |
{ |
1619 |
QTAILQ_REMOVE(&memory_listeners, listener, link); |
1620 |
} |
1621 |
|
1622 |
void address_space_init(AddressSpace *as, MemoryRegion *root)
|
1623 |
{ |
1624 |
memory_region_transaction_begin(); |
1625 |
as->root = root; |
1626 |
as->current_map = g_new(FlatView, 1);
|
1627 |
flatview_init(as->current_map); |
1628 |
as->ioeventfd_nb = 0;
|
1629 |
as->ioeventfds = NULL;
|
1630 |
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); |
1631 |
as->name = NULL;
|
1632 |
address_space_init_dispatch(as); |
1633 |
memory_region_update_pending |= root->enabled; |
1634 |
memory_region_transaction_commit(); |
1635 |
} |
1636 |
|
1637 |
void address_space_destroy(AddressSpace *as)
|
1638 |
{ |
1639 |
/* Flush out anything from MemoryListeners listening in on this */
|
1640 |
memory_region_transaction_begin(); |
1641 |
as->root = NULL;
|
1642 |
memory_region_transaction_commit(); |
1643 |
QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); |
1644 |
address_space_destroy_dispatch(as); |
1645 |
flatview_destroy(as->current_map); |
1646 |
g_free(as->current_map); |
1647 |
g_free(as->ioeventfds); |
1648 |
} |
1649 |
|
1650 |
uint64_t io_mem_read(MemoryRegion *mr, hwaddr addr, unsigned size)
|
1651 |
{ |
1652 |
return memory_region_dispatch_read(mr, addr, size);
|
1653 |
} |
1654 |
|
1655 |
void io_mem_write(MemoryRegion *mr, hwaddr addr,
|
1656 |
uint64_t val, unsigned size)
|
1657 |
{ |
1658 |
memory_region_dispatch_write(mr, addr, val, size); |
1659 |
} |
1660 |
|
1661 |
typedef struct MemoryRegionList MemoryRegionList; |
1662 |
|
1663 |
struct MemoryRegionList {
|
1664 |
const MemoryRegion *mr;
|
1665 |
bool printed;
|
1666 |
QTAILQ_ENTRY(MemoryRegionList) queue; |
1667 |
}; |
1668 |
|
1669 |
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
|
1670 |
|
1671 |
static void mtree_print_mr(fprintf_function mon_printf, void *f, |
1672 |
const MemoryRegion *mr, unsigned int level, |
1673 |
hwaddr base, |
1674 |
MemoryRegionListHead *alias_print_queue) |
1675 |
{ |
1676 |
MemoryRegionList *new_ml, *ml, *next_ml; |
1677 |
MemoryRegionListHead submr_print_queue; |
1678 |
const MemoryRegion *submr;
|
1679 |
unsigned int i; |
1680 |
|
1681 |
if (!mr || !mr->enabled) {
|
1682 |
return;
|
1683 |
} |
1684 |
|
1685 |
for (i = 0; i < level; i++) { |
1686 |
mon_printf(f, " ");
|
1687 |
} |
1688 |
|
1689 |
if (mr->alias) {
|
1690 |
MemoryRegionList *ml; |
1691 |
bool found = false; |
1692 |
|
1693 |
/* check if the alias is already in the queue */
|
1694 |
QTAILQ_FOREACH(ml, alias_print_queue, queue) { |
1695 |
if (ml->mr == mr->alias && !ml->printed) {
|
1696 |
found = true;
|
1697 |
} |
1698 |
} |
1699 |
|
1700 |
if (!found) {
|
1701 |
ml = g_new(MemoryRegionList, 1);
|
1702 |
ml->mr = mr->alias; |
1703 |
ml->printed = false;
|
1704 |
QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue); |
1705 |
} |
1706 |
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
|
1707 |
" (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
|
1708 |
"-" TARGET_FMT_plx "\n", |
1709 |
base + mr->addr, |
1710 |
base + mr->addr |
1711 |
+ (hwaddr)int128_get64(mr->size) - 1,
|
1712 |
mr->priority, |
1713 |
mr->romd_mode ? 'R' : '-', |
1714 |
!mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
|
1715 |
: '-',
|
1716 |
mr->name, |
1717 |
mr->alias->name, |
1718 |
mr->alias_offset, |
1719 |
mr->alias_offset |
1720 |
+ (hwaddr)int128_get64(mr->size) - 1);
|
1721 |
} else {
|
1722 |
mon_printf(f, |
1723 |
TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n", |
1724 |
base + mr->addr, |
1725 |
base + mr->addr |
1726 |
+ (hwaddr)int128_get64(mr->size) - 1,
|
1727 |
mr->priority, |
1728 |
mr->romd_mode ? 'R' : '-', |
1729 |
!mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
|
1730 |
: '-',
|
1731 |
mr->name); |
1732 |
} |
1733 |
|
1734 |
QTAILQ_INIT(&submr_print_queue); |
1735 |
|
1736 |
QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { |
1737 |
new_ml = g_new(MemoryRegionList, 1);
|
1738 |
new_ml->mr = submr; |
1739 |
QTAILQ_FOREACH(ml, &submr_print_queue, queue) { |
1740 |
if (new_ml->mr->addr < ml->mr->addr ||
|
1741 |
(new_ml->mr->addr == ml->mr->addr && |
1742 |
new_ml->mr->priority > ml->mr->priority)) { |
1743 |
QTAILQ_INSERT_BEFORE(ml, new_ml, queue); |
1744 |
new_ml = NULL;
|
1745 |
break;
|
1746 |
} |
1747 |
} |
1748 |
if (new_ml) {
|
1749 |
QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue); |
1750 |
} |
1751 |
} |
1752 |
|
1753 |
QTAILQ_FOREACH(ml, &submr_print_queue, queue) { |
1754 |
mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
|
1755 |
alias_print_queue); |
1756 |
} |
1757 |
|
1758 |
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) { |
1759 |
g_free(ml); |
1760 |
} |
1761 |
} |
1762 |
|
1763 |
void mtree_info(fprintf_function mon_printf, void *f) |
1764 |
{ |
1765 |
MemoryRegionListHead ml_head; |
1766 |
MemoryRegionList *ml, *ml2; |
1767 |
AddressSpace *as; |
1768 |
|
1769 |
QTAILQ_INIT(&ml_head); |
1770 |
|
1771 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1772 |
if (!as->name) {
|
1773 |
continue;
|
1774 |
} |
1775 |
mon_printf(f, "%s\n", as->name);
|
1776 |
mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head); |
1777 |
} |
1778 |
|
1779 |
mon_printf(f, "aliases\n");
|
1780 |
/* print aliased regions */
|
1781 |
QTAILQ_FOREACH(ml, &ml_head, queue) { |
1782 |
if (!ml->printed) {
|
1783 |
mon_printf(f, "%s\n", ml->mr->name);
|
1784 |
mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); |
1785 |
} |
1786 |
} |
1787 |
|
1788 |
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) { |
1789 |
g_free(ml); |
1790 |
} |
1791 |
} |