root / memory.c @ ac1970fb
History | View | Annotate | Download (52.4 kB)
1 |
/*
|
---|---|
2 |
* Physical memory management
|
3 |
*
|
4 |
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
5 |
*
|
6 |
* Authors:
|
7 |
* Avi Kivity <avi@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Contributions after 2012-01-13 are licensed under the terms of the
|
13 |
* GNU GPL, version 2 or (at your option) any later version.
|
14 |
*/
|
15 |
|
16 |
#include "memory.h" |
17 |
#include "exec-memory.h" |
18 |
#include "ioport.h" |
19 |
#include "bitops.h" |
20 |
#include "kvm.h" |
21 |
#include <assert.h> |
22 |
|
23 |
#include "memory-internal.h" |
24 |
|
25 |
unsigned memory_region_transaction_depth = 0; |
26 |
static bool global_dirty_log = false; |
27 |
|
28 |
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
|
29 |
= QTAILQ_HEAD_INITIALIZER(memory_listeners); |
30 |
|
31 |
static QTAILQ_HEAD(, AddressSpace) address_spaces
|
32 |
= QTAILQ_HEAD_INITIALIZER(address_spaces); |
33 |
|
34 |
typedef struct AddrRange AddrRange; |
35 |
|
36 |
/*
|
37 |
* Note using signed integers limits us to physical addresses at most
|
38 |
* 63 bits wide. They are needed for negative offsetting in aliases
|
39 |
* (large MemoryRegion::alias_offset).
|
40 |
*/
|
41 |
struct AddrRange {
|
42 |
Int128 start; |
43 |
Int128 size; |
44 |
}; |
45 |
|
46 |
static AddrRange addrrange_make(Int128 start, Int128 size)
|
47 |
{ |
48 |
return (AddrRange) { start, size };
|
49 |
} |
50 |
|
51 |
static bool addrrange_equal(AddrRange r1, AddrRange r2) |
52 |
{ |
53 |
return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
|
54 |
} |
55 |
|
56 |
static Int128 addrrange_end(AddrRange r)
|
57 |
{ |
58 |
return int128_add(r.start, r.size);
|
59 |
} |
60 |
|
61 |
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
|
62 |
{ |
63 |
int128_addto(&range.start, delta); |
64 |
return range;
|
65 |
} |
66 |
|
67 |
static bool addrrange_contains(AddrRange range, Int128 addr) |
68 |
{ |
69 |
return int128_ge(addr, range.start)
|
70 |
&& int128_lt(addr, addrrange_end(range)); |
71 |
} |
72 |
|
73 |
static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
74 |
{ |
75 |
return addrrange_contains(r1, r2.start)
|
76 |
|| addrrange_contains(r2, r1.start); |
77 |
} |
78 |
|
79 |
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
80 |
{ |
81 |
Int128 start = int128_max(r1.start, r2.start); |
82 |
Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); |
83 |
return addrrange_make(start, int128_sub(end, start));
|
84 |
} |
85 |
|
86 |
enum ListenerDirection { Forward, Reverse };
|
87 |
|
88 |
static bool memory_listener_match(MemoryListener *listener, |
89 |
MemoryRegionSection *section) |
90 |
{ |
91 |
return !listener->address_space_filter
|
92 |
|| listener->address_space_filter == section->address_space; |
93 |
} |
94 |
|
95 |
#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
|
96 |
do { \
|
97 |
MemoryListener *_listener; \ |
98 |
\ |
99 |
switch (_direction) { \
|
100 |
case Forward: \
|
101 |
QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ |
102 |
if (_listener->_callback) { \
|
103 |
_listener->_callback(_listener, ##_args); \ |
104 |
} \ |
105 |
} \ |
106 |
break; \
|
107 |
case Reverse: \
|
108 |
QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ |
109 |
memory_listeners, link) { \ |
110 |
if (_listener->_callback) { \
|
111 |
_listener->_callback(_listener, ##_args); \ |
112 |
} \ |
113 |
} \ |
114 |
break; \
|
115 |
default: \
|
116 |
abort(); \ |
117 |
} \ |
118 |
} while (0) |
119 |
|
120 |
#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
|
121 |
do { \
|
122 |
MemoryListener *_listener; \ |
123 |
\ |
124 |
switch (_direction) { \
|
125 |
case Forward: \
|
126 |
QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ |
127 |
if (_listener->_callback \
|
128 |
&& memory_listener_match(_listener, _section)) { \ |
129 |
_listener->_callback(_listener, _section, ##_args); \ |
130 |
} \ |
131 |
} \ |
132 |
break; \
|
133 |
case Reverse: \
|
134 |
QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ |
135 |
memory_listeners, link) { \ |
136 |
if (_listener->_callback \
|
137 |
&& memory_listener_match(_listener, _section)) { \ |
138 |
_listener->_callback(_listener, _section, ##_args); \ |
139 |
} \ |
140 |
} \ |
141 |
break; \
|
142 |
default: \
|
143 |
abort(); \ |
144 |
} \ |
145 |
} while (0) |
146 |
|
147 |
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
|
148 |
MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ |
149 |
.mr = (fr)->mr, \ |
150 |
.address_space = (as), \ |
151 |
.offset_within_region = (fr)->offset_in_region, \ |
152 |
.size = int128_get64((fr)->addr.size), \ |
153 |
.offset_within_address_space = int128_get64((fr)->addr.start), \ |
154 |
.readonly = (fr)->readonly, \ |
155 |
})) |
156 |
|
157 |
struct CoalescedMemoryRange {
|
158 |
AddrRange addr; |
159 |
QTAILQ_ENTRY(CoalescedMemoryRange) link; |
160 |
}; |
161 |
|
162 |
struct MemoryRegionIoeventfd {
|
163 |
AddrRange addr; |
164 |
bool match_data;
|
165 |
uint64_t data; |
166 |
EventNotifier *e; |
167 |
}; |
168 |
|
169 |
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, |
170 |
MemoryRegionIoeventfd b) |
171 |
{ |
172 |
if (int128_lt(a.addr.start, b.addr.start)) {
|
173 |
return true; |
174 |
} else if (int128_gt(a.addr.start, b.addr.start)) { |
175 |
return false; |
176 |
} else if (int128_lt(a.addr.size, b.addr.size)) { |
177 |
return true; |
178 |
} else if (int128_gt(a.addr.size, b.addr.size)) { |
179 |
return false; |
180 |
} else if (a.match_data < b.match_data) { |
181 |
return true; |
182 |
} else if (a.match_data > b.match_data) { |
183 |
return false; |
184 |
} else if (a.match_data) { |
185 |
if (a.data < b.data) {
|
186 |
return true; |
187 |
} else if (a.data > b.data) { |
188 |
return false; |
189 |
} |
190 |
} |
191 |
if (a.e < b.e) {
|
192 |
return true; |
193 |
} else if (a.e > b.e) { |
194 |
return false; |
195 |
} |
196 |
return false; |
197 |
} |
198 |
|
199 |
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, |
200 |
MemoryRegionIoeventfd b) |
201 |
{ |
202 |
return !memory_region_ioeventfd_before(a, b)
|
203 |
&& !memory_region_ioeventfd_before(b, a); |
204 |
} |
205 |
|
206 |
typedef struct FlatRange FlatRange; |
207 |
typedef struct FlatView FlatView; |
208 |
|
209 |
/* Range of memory in the global map. Addresses are absolute. */
|
210 |
struct FlatRange {
|
211 |
MemoryRegion *mr; |
212 |
target_phys_addr_t offset_in_region; |
213 |
AddrRange addr; |
214 |
uint8_t dirty_log_mask; |
215 |
bool readable;
|
216 |
bool readonly;
|
217 |
}; |
218 |
|
219 |
/* Flattened global view of current active memory hierarchy. Kept in sorted
|
220 |
* order.
|
221 |
*/
|
222 |
struct FlatView {
|
223 |
FlatRange *ranges; |
224 |
unsigned nr;
|
225 |
unsigned nr_allocated;
|
226 |
}; |
227 |
|
228 |
typedef struct AddressSpaceOps AddressSpaceOps; |
229 |
|
230 |
#define FOR_EACH_FLAT_RANGE(var, view) \
|
231 |
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
232 |
|
233 |
static bool flatrange_equal(FlatRange *a, FlatRange *b) |
234 |
{ |
235 |
return a->mr == b->mr
|
236 |
&& addrrange_equal(a->addr, b->addr) |
237 |
&& a->offset_in_region == b->offset_in_region |
238 |
&& a->readable == b->readable |
239 |
&& a->readonly == b->readonly; |
240 |
} |
241 |
|
242 |
static void flatview_init(FlatView *view) |
243 |
{ |
244 |
view->ranges = NULL;
|
245 |
view->nr = 0;
|
246 |
view->nr_allocated = 0;
|
247 |
} |
248 |
|
249 |
/* Insert a range into a given position. Caller is responsible for maintaining
|
250 |
* sorting order.
|
251 |
*/
|
252 |
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
253 |
{ |
254 |
if (view->nr == view->nr_allocated) {
|
255 |
view->nr_allocated = MAX(2 * view->nr, 10); |
256 |
view->ranges = g_realloc(view->ranges, |
257 |
view->nr_allocated * sizeof(*view->ranges));
|
258 |
} |
259 |
memmove(view->ranges + pos + 1, view->ranges + pos,
|
260 |
(view->nr - pos) * sizeof(FlatRange));
|
261 |
view->ranges[pos] = *range; |
262 |
++view->nr; |
263 |
} |
264 |
|
265 |
static void flatview_destroy(FlatView *view) |
266 |
{ |
267 |
g_free(view->ranges); |
268 |
} |
269 |
|
270 |
static bool can_merge(FlatRange *r1, FlatRange *r2) |
271 |
{ |
272 |
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
|
273 |
&& r1->mr == r2->mr |
274 |
&& int128_eq(int128_add(int128_make64(r1->offset_in_region), |
275 |
r1->addr.size), |
276 |
int128_make64(r2->offset_in_region)) |
277 |
&& r1->dirty_log_mask == r2->dirty_log_mask |
278 |
&& r1->readable == r2->readable |
279 |
&& r1->readonly == r2->readonly; |
280 |
} |
281 |
|
282 |
/* Attempt to simplify a view by merging ajacent ranges */
|
283 |
static void flatview_simplify(FlatView *view) |
284 |
{ |
285 |
unsigned i, j;
|
286 |
|
287 |
i = 0;
|
288 |
while (i < view->nr) {
|
289 |
j = i + 1;
|
290 |
while (j < view->nr
|
291 |
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
292 |
int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); |
293 |
++j; |
294 |
} |
295 |
++i; |
296 |
memmove(&view->ranges[i], &view->ranges[j], |
297 |
(view->nr - j) * sizeof(view->ranges[j]));
|
298 |
view->nr -= j - i; |
299 |
} |
300 |
} |
301 |
|
302 |
static void memory_region_read_accessor(void *opaque, |
303 |
target_phys_addr_t addr, |
304 |
uint64_t *value, |
305 |
unsigned size,
|
306 |
unsigned shift,
|
307 |
uint64_t mask) |
308 |
{ |
309 |
MemoryRegion *mr = opaque; |
310 |
uint64_t tmp; |
311 |
|
312 |
if (mr->flush_coalesced_mmio) {
|
313 |
qemu_flush_coalesced_mmio_buffer(); |
314 |
} |
315 |
tmp = mr->ops->read(mr->opaque, addr, size); |
316 |
*value |= (tmp & mask) << shift; |
317 |
} |
318 |
|
319 |
static void memory_region_write_accessor(void *opaque, |
320 |
target_phys_addr_t addr, |
321 |
uint64_t *value, |
322 |
unsigned size,
|
323 |
unsigned shift,
|
324 |
uint64_t mask) |
325 |
{ |
326 |
MemoryRegion *mr = opaque; |
327 |
uint64_t tmp; |
328 |
|
329 |
if (mr->flush_coalesced_mmio) {
|
330 |
qemu_flush_coalesced_mmio_buffer(); |
331 |
} |
332 |
tmp = (*value >> shift) & mask; |
333 |
mr->ops->write(mr->opaque, addr, tmp, size); |
334 |
} |
335 |
|
336 |
static void access_with_adjusted_size(target_phys_addr_t addr, |
337 |
uint64_t *value, |
338 |
unsigned size,
|
339 |
unsigned access_size_min,
|
340 |
unsigned access_size_max,
|
341 |
void (*access)(void *opaque, |
342 |
target_phys_addr_t addr, |
343 |
uint64_t *value, |
344 |
unsigned size,
|
345 |
unsigned shift,
|
346 |
uint64_t mask), |
347 |
void *opaque)
|
348 |
{ |
349 |
uint64_t access_mask; |
350 |
unsigned access_size;
|
351 |
unsigned i;
|
352 |
|
353 |
if (!access_size_min) {
|
354 |
access_size_min = 1;
|
355 |
} |
356 |
if (!access_size_max) {
|
357 |
access_size_max = 4;
|
358 |
} |
359 |
access_size = MAX(MIN(size, access_size_max), access_size_min); |
360 |
access_mask = -1ULL >> (64 - access_size * 8); |
361 |
for (i = 0; i < size; i += access_size) { |
362 |
/* FIXME: big-endian support */
|
363 |
access(opaque, addr + i, value, access_size, i * 8, access_mask);
|
364 |
} |
365 |
} |
366 |
|
367 |
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset, |
368 |
unsigned width, bool write) |
369 |
{ |
370 |
const MemoryRegionPortio *mrp;
|
371 |
|
372 |
for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
|
373 |
if (offset >= mrp->offset && offset < mrp->offset + mrp->len
|
374 |
&& width == mrp->size |
375 |
&& (write ? (bool)mrp->write : (bool)mrp->read)) { |
376 |
return mrp;
|
377 |
} |
378 |
} |
379 |
return NULL; |
380 |
} |
381 |
|
382 |
static void memory_region_iorange_read(IORange *iorange, |
383 |
uint64_t offset, |
384 |
unsigned width,
|
385 |
uint64_t *data) |
386 |
{ |
387 |
MemoryRegionIORange *mrio |
388 |
= container_of(iorange, MemoryRegionIORange, iorange); |
389 |
MemoryRegion *mr = mrio->mr; |
390 |
|
391 |
offset += mrio->offset; |
392 |
if (mr->ops->old_portio) {
|
393 |
const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
|
394 |
width, false);
|
395 |
|
396 |
*data = ((uint64_t)1 << (width * 8)) - 1; |
397 |
if (mrp) {
|
398 |
*data = mrp->read(mr->opaque, offset); |
399 |
} else if (width == 2) { |
400 |
mrp = find_portio(mr, offset - mrio->offset, 1, false); |
401 |
assert(mrp); |
402 |
*data = mrp->read(mr->opaque, offset) | |
403 |
(mrp->read(mr->opaque, offset + 1) << 8); |
404 |
} |
405 |
return;
|
406 |
} |
407 |
*data = 0;
|
408 |
access_with_adjusted_size(offset, data, width, |
409 |
mr->ops->impl.min_access_size, |
410 |
mr->ops->impl.max_access_size, |
411 |
memory_region_read_accessor, mr); |
412 |
} |
413 |
|
414 |
static void memory_region_iorange_write(IORange *iorange, |
415 |
uint64_t offset, |
416 |
unsigned width,
|
417 |
uint64_t data) |
418 |
{ |
419 |
MemoryRegionIORange *mrio |
420 |
= container_of(iorange, MemoryRegionIORange, iorange); |
421 |
MemoryRegion *mr = mrio->mr; |
422 |
|
423 |
offset += mrio->offset; |
424 |
if (mr->ops->old_portio) {
|
425 |
const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
|
426 |
width, true);
|
427 |
|
428 |
if (mrp) {
|
429 |
mrp->write(mr->opaque, offset, data); |
430 |
} else if (width == 2) { |
431 |
mrp = find_portio(mr, offset - mrio->offset, 1, true); |
432 |
assert(mrp); |
433 |
mrp->write(mr->opaque, offset, data & 0xff);
|
434 |
mrp->write(mr->opaque, offset + 1, data >> 8); |
435 |
} |
436 |
return;
|
437 |
} |
438 |
access_with_adjusted_size(offset, &data, width, |
439 |
mr->ops->impl.min_access_size, |
440 |
mr->ops->impl.max_access_size, |
441 |
memory_region_write_accessor, mr); |
442 |
} |
443 |
|
444 |
static void memory_region_iorange_destructor(IORange *iorange) |
445 |
{ |
446 |
g_free(container_of(iorange, MemoryRegionIORange, iorange)); |
447 |
} |
448 |
|
449 |
const IORangeOps memory_region_iorange_ops = {
|
450 |
.read = memory_region_iorange_read, |
451 |
.write = memory_region_iorange_write, |
452 |
.destructor = memory_region_iorange_destructor, |
453 |
}; |
454 |
|
455 |
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
|
456 |
{ |
457 |
AddressSpace *as; |
458 |
|
459 |
while (mr->parent) {
|
460 |
mr = mr->parent; |
461 |
} |
462 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
463 |
if (mr == as->root) {
|
464 |
return as;
|
465 |
} |
466 |
} |
467 |
abort(); |
468 |
} |
469 |
|
470 |
/* Render a memory region into the global view. Ranges in @view obscure
|
471 |
* ranges in @mr.
|
472 |
*/
|
473 |
static void render_memory_region(FlatView *view, |
474 |
MemoryRegion *mr, |
475 |
Int128 base, |
476 |
AddrRange clip, |
477 |
bool readonly)
|
478 |
{ |
479 |
MemoryRegion *subregion; |
480 |
unsigned i;
|
481 |
target_phys_addr_t offset_in_region; |
482 |
Int128 remain; |
483 |
Int128 now; |
484 |
FlatRange fr; |
485 |
AddrRange tmp; |
486 |
|
487 |
if (!mr->enabled) {
|
488 |
return;
|
489 |
} |
490 |
|
491 |
int128_addto(&base, int128_make64(mr->addr)); |
492 |
readonly |= mr->readonly; |
493 |
|
494 |
tmp = addrrange_make(base, mr->size); |
495 |
|
496 |
if (!addrrange_intersects(tmp, clip)) {
|
497 |
return;
|
498 |
} |
499 |
|
500 |
clip = addrrange_intersection(tmp, clip); |
501 |
|
502 |
if (mr->alias) {
|
503 |
int128_subfrom(&base, int128_make64(mr->alias->addr)); |
504 |
int128_subfrom(&base, int128_make64(mr->alias_offset)); |
505 |
render_memory_region(view, mr->alias, base, clip, readonly); |
506 |
return;
|
507 |
} |
508 |
|
509 |
/* Render subregions in priority order. */
|
510 |
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
511 |
render_memory_region(view, subregion, base, clip, readonly); |
512 |
} |
513 |
|
514 |
if (!mr->terminates) {
|
515 |
return;
|
516 |
} |
517 |
|
518 |
offset_in_region = int128_get64(int128_sub(clip.start, base)); |
519 |
base = clip.start; |
520 |
remain = clip.size; |
521 |
|
522 |
/* Render the region itself into any gaps left by the current view. */
|
523 |
for (i = 0; i < view->nr && int128_nz(remain); ++i) { |
524 |
if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
|
525 |
continue;
|
526 |
} |
527 |
if (int128_lt(base, view->ranges[i].addr.start)) {
|
528 |
now = int128_min(remain, |
529 |
int128_sub(view->ranges[i].addr.start, base)); |
530 |
fr.mr = mr; |
531 |
fr.offset_in_region = offset_in_region; |
532 |
fr.addr = addrrange_make(base, now); |
533 |
fr.dirty_log_mask = mr->dirty_log_mask; |
534 |
fr.readable = mr->readable; |
535 |
fr.readonly = readonly; |
536 |
flatview_insert(view, i, &fr); |
537 |
++i; |
538 |
int128_addto(&base, now); |
539 |
offset_in_region += int128_get64(now); |
540 |
int128_subfrom(&remain, now); |
541 |
} |
542 |
if (int128_eq(base, view->ranges[i].addr.start)) {
|
543 |
now = int128_min(remain, view->ranges[i].addr.size); |
544 |
int128_addto(&base, now); |
545 |
offset_in_region += int128_get64(now); |
546 |
int128_subfrom(&remain, now); |
547 |
} |
548 |
} |
549 |
if (int128_nz(remain)) {
|
550 |
fr.mr = mr; |
551 |
fr.offset_in_region = offset_in_region; |
552 |
fr.addr = addrrange_make(base, remain); |
553 |
fr.dirty_log_mask = mr->dirty_log_mask; |
554 |
fr.readable = mr->readable; |
555 |
fr.readonly = readonly; |
556 |
flatview_insert(view, i, &fr); |
557 |
} |
558 |
} |
559 |
|
560 |
/* Render a memory topology into a list of disjoint absolute ranges. */
|
561 |
static FlatView generate_memory_topology(MemoryRegion *mr)
|
562 |
{ |
563 |
FlatView view; |
564 |
|
565 |
flatview_init(&view); |
566 |
|
567 |
render_memory_region(&view, mr, int128_zero(), |
568 |
addrrange_make(int128_zero(), int128_2_64()), false);
|
569 |
flatview_simplify(&view); |
570 |
|
571 |
return view;
|
572 |
} |
573 |
|
574 |
static void address_space_add_del_ioeventfds(AddressSpace *as, |
575 |
MemoryRegionIoeventfd *fds_new, |
576 |
unsigned fds_new_nb,
|
577 |
MemoryRegionIoeventfd *fds_old, |
578 |
unsigned fds_old_nb)
|
579 |
{ |
580 |
unsigned iold, inew;
|
581 |
MemoryRegionIoeventfd *fd; |
582 |
MemoryRegionSection section; |
583 |
|
584 |
/* Generate a symmetric difference of the old and new fd sets, adding
|
585 |
* and deleting as necessary.
|
586 |
*/
|
587 |
|
588 |
iold = inew = 0;
|
589 |
while (iold < fds_old_nb || inew < fds_new_nb) {
|
590 |
if (iold < fds_old_nb
|
591 |
&& (inew == fds_new_nb |
592 |
|| memory_region_ioeventfd_before(fds_old[iold], |
593 |
fds_new[inew]))) { |
594 |
fd = &fds_old[iold]; |
595 |
section = (MemoryRegionSection) { |
596 |
.address_space = as, |
597 |
.offset_within_address_space = int128_get64(fd->addr.start), |
598 |
.size = int128_get64(fd->addr.size), |
599 |
}; |
600 |
MEMORY_LISTENER_CALL(eventfd_del, Forward, §ion, |
601 |
fd->match_data, fd->data, fd->e); |
602 |
++iold; |
603 |
} else if (inew < fds_new_nb |
604 |
&& (iold == fds_old_nb |
605 |
|| memory_region_ioeventfd_before(fds_new[inew], |
606 |
fds_old[iold]))) { |
607 |
fd = &fds_new[inew]; |
608 |
section = (MemoryRegionSection) { |
609 |
.address_space = as, |
610 |
.offset_within_address_space = int128_get64(fd->addr.start), |
611 |
.size = int128_get64(fd->addr.size), |
612 |
}; |
613 |
MEMORY_LISTENER_CALL(eventfd_add, Reverse, §ion, |
614 |
fd->match_data, fd->data, fd->e); |
615 |
++inew; |
616 |
} else {
|
617 |
++iold; |
618 |
++inew; |
619 |
} |
620 |
} |
621 |
} |
622 |
|
623 |
static void address_space_update_ioeventfds(AddressSpace *as) |
624 |
{ |
625 |
FlatRange *fr; |
626 |
unsigned ioeventfd_nb = 0; |
627 |
MemoryRegionIoeventfd *ioeventfds = NULL;
|
628 |
AddrRange tmp; |
629 |
unsigned i;
|
630 |
|
631 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
632 |
for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { |
633 |
tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, |
634 |
int128_sub(fr->addr.start, |
635 |
int128_make64(fr->offset_in_region))); |
636 |
if (addrrange_intersects(fr->addr, tmp)) {
|
637 |
++ioeventfd_nb; |
638 |
ioeventfds = g_realloc(ioeventfds, |
639 |
ioeventfd_nb * sizeof(*ioeventfds));
|
640 |
ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
|
641 |
ioeventfds[ioeventfd_nb-1].addr = tmp;
|
642 |
} |
643 |
} |
644 |
} |
645 |
|
646 |
address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, |
647 |
as->ioeventfds, as->ioeventfd_nb); |
648 |
|
649 |
g_free(as->ioeventfds); |
650 |
as->ioeventfds = ioeventfds; |
651 |
as->ioeventfd_nb = ioeventfd_nb; |
652 |
} |
653 |
|
654 |
static void address_space_update_topology_pass(AddressSpace *as, |
655 |
FlatView old_view, |
656 |
FlatView new_view, |
657 |
bool adding)
|
658 |
{ |
659 |
unsigned iold, inew;
|
660 |
FlatRange *frold, *frnew; |
661 |
|
662 |
/* Generate a symmetric difference of the old and new memory maps.
|
663 |
* Kill ranges in the old map, and instantiate ranges in the new map.
|
664 |
*/
|
665 |
iold = inew = 0;
|
666 |
while (iold < old_view.nr || inew < new_view.nr) {
|
667 |
if (iold < old_view.nr) {
|
668 |
frold = &old_view.ranges[iold]; |
669 |
} else {
|
670 |
frold = NULL;
|
671 |
} |
672 |
if (inew < new_view.nr) {
|
673 |
frnew = &new_view.ranges[inew]; |
674 |
} else {
|
675 |
frnew = NULL;
|
676 |
} |
677 |
|
678 |
if (frold
|
679 |
&& (!frnew |
680 |
|| int128_lt(frold->addr.start, frnew->addr.start) |
681 |
|| (int128_eq(frold->addr.start, frnew->addr.start) |
682 |
&& !flatrange_equal(frold, frnew)))) { |
683 |
/* In old, but (not in new, or in new but attributes changed). */
|
684 |
|
685 |
if (!adding) {
|
686 |
MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); |
687 |
} |
688 |
|
689 |
++iold; |
690 |
} else if (frold && frnew && flatrange_equal(frold, frnew)) { |
691 |
/* In both (logging may have changed) */
|
692 |
|
693 |
if (adding) {
|
694 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); |
695 |
if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
|
696 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop); |
697 |
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
698 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start); |
699 |
} |
700 |
} |
701 |
|
702 |
++iold; |
703 |
++inew; |
704 |
} else {
|
705 |
/* In new */
|
706 |
|
707 |
if (adding) {
|
708 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); |
709 |
} |
710 |
|
711 |
++inew; |
712 |
} |
713 |
} |
714 |
} |
715 |
|
716 |
|
717 |
static void address_space_update_topology(AddressSpace *as) |
718 |
{ |
719 |
FlatView old_view = *as->current_map; |
720 |
FlatView new_view = generate_memory_topology(as->root); |
721 |
|
722 |
address_space_update_topology_pass(as, old_view, new_view, false);
|
723 |
address_space_update_topology_pass(as, old_view, new_view, true);
|
724 |
|
725 |
*as->current_map = new_view; |
726 |
flatview_destroy(&old_view); |
727 |
address_space_update_ioeventfds(as); |
728 |
} |
729 |
|
730 |
void memory_region_transaction_begin(void) |
731 |
{ |
732 |
qemu_flush_coalesced_mmio_buffer(); |
733 |
++memory_region_transaction_depth; |
734 |
} |
735 |
|
736 |
void memory_region_transaction_commit(void) |
737 |
{ |
738 |
AddressSpace *as; |
739 |
|
740 |
assert(memory_region_transaction_depth); |
741 |
--memory_region_transaction_depth; |
742 |
if (!memory_region_transaction_depth) {
|
743 |
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); |
744 |
|
745 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
746 |
address_space_update_topology(as); |
747 |
} |
748 |
|
749 |
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); |
750 |
} |
751 |
} |
752 |
|
753 |
static void memory_region_destructor_none(MemoryRegion *mr) |
754 |
{ |
755 |
} |
756 |
|
757 |
static void memory_region_destructor_ram(MemoryRegion *mr) |
758 |
{ |
759 |
qemu_ram_free(mr->ram_addr); |
760 |
} |
761 |
|
762 |
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) |
763 |
{ |
764 |
qemu_ram_free_from_ptr(mr->ram_addr); |
765 |
} |
766 |
|
767 |
static void memory_region_destructor_iomem(MemoryRegion *mr) |
768 |
{ |
769 |
} |
770 |
|
771 |
static void memory_region_destructor_rom_device(MemoryRegion *mr) |
772 |
{ |
773 |
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); |
774 |
} |
775 |
|
776 |
static bool memory_region_wrong_endianness(MemoryRegion *mr) |
777 |
{ |
778 |
#ifdef TARGET_WORDS_BIGENDIAN
|
779 |
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
|
780 |
#else
|
781 |
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
782 |
#endif
|
783 |
} |
784 |
|
785 |
void memory_region_init(MemoryRegion *mr,
|
786 |
const char *name, |
787 |
uint64_t size) |
788 |
{ |
789 |
mr->ops = NULL;
|
790 |
mr->parent = NULL;
|
791 |
mr->size = int128_make64(size); |
792 |
if (size == UINT64_MAX) {
|
793 |
mr->size = int128_2_64(); |
794 |
} |
795 |
mr->addr = 0;
|
796 |
mr->subpage = false;
|
797 |
mr->enabled = true;
|
798 |
mr->terminates = false;
|
799 |
mr->ram = false;
|
800 |
mr->readable = true;
|
801 |
mr->readonly = false;
|
802 |
mr->rom_device = false;
|
803 |
mr->destructor = memory_region_destructor_none; |
804 |
mr->priority = 0;
|
805 |
mr->may_overlap = false;
|
806 |
mr->alias = NULL;
|
807 |
QTAILQ_INIT(&mr->subregions); |
808 |
memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
809 |
QTAILQ_INIT(&mr->coalesced); |
810 |
mr->name = g_strdup(name); |
811 |
mr->dirty_log_mask = 0;
|
812 |
mr->ioeventfd_nb = 0;
|
813 |
mr->ioeventfds = NULL;
|
814 |
mr->flush_coalesced_mmio = false;
|
815 |
} |
816 |
|
817 |
static bool memory_region_access_valid(MemoryRegion *mr, |
818 |
target_phys_addr_t addr, |
819 |
unsigned size,
|
820 |
bool is_write)
|
821 |
{ |
822 |
if (mr->ops->valid.accepts
|
823 |
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write)) { |
824 |
return false; |
825 |
} |
826 |
|
827 |
if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
828 |
return false; |
829 |
} |
830 |
|
831 |
/* Treat zero as compatibility all valid */
|
832 |
if (!mr->ops->valid.max_access_size) {
|
833 |
return true; |
834 |
} |
835 |
|
836 |
if (size > mr->ops->valid.max_access_size
|
837 |
|| size < mr->ops->valid.min_access_size) { |
838 |
return false; |
839 |
} |
840 |
return true; |
841 |
} |
842 |
|
843 |
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
|
844 |
target_phys_addr_t addr, |
845 |
unsigned size)
|
846 |
{ |
847 |
uint64_t data = 0;
|
848 |
|
849 |
if (!memory_region_access_valid(mr, addr, size, false)) { |
850 |
return -1U; /* FIXME: better signalling */ |
851 |
} |
852 |
|
853 |
if (!mr->ops->read) {
|
854 |
return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
|
855 |
} |
856 |
|
857 |
/* FIXME: support unaligned access */
|
858 |
access_with_adjusted_size(addr, &data, size, |
859 |
mr->ops->impl.min_access_size, |
860 |
mr->ops->impl.max_access_size, |
861 |
memory_region_read_accessor, mr); |
862 |
|
863 |
return data;
|
864 |
} |
865 |
|
866 |
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) |
867 |
{ |
868 |
if (memory_region_wrong_endianness(mr)) {
|
869 |
switch (size) {
|
870 |
case 1: |
871 |
break;
|
872 |
case 2: |
873 |
*data = bswap16(*data); |
874 |
break;
|
875 |
case 4: |
876 |
*data = bswap32(*data); |
877 |
break;
|
878 |
default:
|
879 |
abort(); |
880 |
} |
881 |
} |
882 |
} |
883 |
|
884 |
static uint64_t memory_region_dispatch_read(MemoryRegion *mr,
|
885 |
target_phys_addr_t addr, |
886 |
unsigned size)
|
887 |
{ |
888 |
uint64_t ret; |
889 |
|
890 |
ret = memory_region_dispatch_read1(mr, addr, size); |
891 |
adjust_endianness(mr, &ret, size); |
892 |
return ret;
|
893 |
} |
894 |
|
895 |
static void memory_region_dispatch_write(MemoryRegion *mr, |
896 |
target_phys_addr_t addr, |
897 |
uint64_t data, |
898 |
unsigned size)
|
899 |
{ |
900 |
if (!memory_region_access_valid(mr, addr, size, true)) { |
901 |
return; /* FIXME: better signalling */ |
902 |
} |
903 |
|
904 |
adjust_endianness(mr, &data, size); |
905 |
|
906 |
if (!mr->ops->write) {
|
907 |
mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data); |
908 |
return;
|
909 |
} |
910 |
|
911 |
/* FIXME: support unaligned access */
|
912 |
access_with_adjusted_size(addr, &data, size, |
913 |
mr->ops->impl.min_access_size, |
914 |
mr->ops->impl.max_access_size, |
915 |
memory_region_write_accessor, mr); |
916 |
} |
917 |
|
918 |
void memory_region_init_io(MemoryRegion *mr,
|
919 |
const MemoryRegionOps *ops,
|
920 |
void *opaque,
|
921 |
const char *name, |
922 |
uint64_t size) |
923 |
{ |
924 |
memory_region_init(mr, name, size); |
925 |
mr->ops = ops; |
926 |
mr->opaque = opaque; |
927 |
mr->terminates = true;
|
928 |
mr->destructor = memory_region_destructor_iomem; |
929 |
mr->ram_addr = ~(ram_addr_t)0;
|
930 |
} |
931 |
|
932 |
void memory_region_init_ram(MemoryRegion *mr,
|
933 |
const char *name, |
934 |
uint64_t size) |
935 |
{ |
936 |
memory_region_init(mr, name, size); |
937 |
mr->ram = true;
|
938 |
mr->terminates = true;
|
939 |
mr->destructor = memory_region_destructor_ram; |
940 |
mr->ram_addr = qemu_ram_alloc(size, mr); |
941 |
} |
942 |
|
943 |
void memory_region_init_ram_ptr(MemoryRegion *mr,
|
944 |
const char *name, |
945 |
uint64_t size, |
946 |
void *ptr)
|
947 |
{ |
948 |
memory_region_init(mr, name, size); |
949 |
mr->ram = true;
|
950 |
mr->terminates = true;
|
951 |
mr->destructor = memory_region_destructor_ram_from_ptr; |
952 |
mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr); |
953 |
} |
954 |
|
955 |
void memory_region_init_alias(MemoryRegion *mr,
|
956 |
const char *name, |
957 |
MemoryRegion *orig, |
958 |
target_phys_addr_t offset, |
959 |
uint64_t size) |
960 |
{ |
961 |
memory_region_init(mr, name, size); |
962 |
mr->alias = orig; |
963 |
mr->alias_offset = offset; |
964 |
} |
965 |
|
966 |
void memory_region_init_rom_device(MemoryRegion *mr,
|
967 |
const MemoryRegionOps *ops,
|
968 |
void *opaque,
|
969 |
const char *name, |
970 |
uint64_t size) |
971 |
{ |
972 |
memory_region_init(mr, name, size); |
973 |
mr->ops = ops; |
974 |
mr->opaque = opaque; |
975 |
mr->terminates = true;
|
976 |
mr->rom_device = true;
|
977 |
mr->destructor = memory_region_destructor_rom_device; |
978 |
mr->ram_addr = qemu_ram_alloc(size, mr); |
979 |
} |
980 |
|
981 |
static uint64_t invalid_read(void *opaque, target_phys_addr_t addr, |
982 |
unsigned size)
|
983 |
{ |
984 |
MemoryRegion *mr = opaque; |
985 |
|
986 |
if (!mr->warning_printed) {
|
987 |
fprintf(stderr, "Invalid read from memory region %s\n", mr->name);
|
988 |
mr->warning_printed = true;
|
989 |
} |
990 |
return -1U; |
991 |
} |
992 |
|
993 |
static void invalid_write(void *opaque, target_phys_addr_t addr, uint64_t data, |
994 |
unsigned size)
|
995 |
{ |
996 |
MemoryRegion *mr = opaque; |
997 |
|
998 |
if (!mr->warning_printed) {
|
999 |
fprintf(stderr, "Invalid write to memory region %s\n", mr->name);
|
1000 |
mr->warning_printed = true;
|
1001 |
} |
1002 |
} |
1003 |
|
1004 |
static const MemoryRegionOps reservation_ops = { |
1005 |
.read = invalid_read, |
1006 |
.write = invalid_write, |
1007 |
.endianness = DEVICE_NATIVE_ENDIAN, |
1008 |
}; |
1009 |
|
1010 |
void memory_region_init_reservation(MemoryRegion *mr,
|
1011 |
const char *name, |
1012 |
uint64_t size) |
1013 |
{ |
1014 |
memory_region_init_io(mr, &reservation_ops, mr, name, size); |
1015 |
} |
1016 |
|
1017 |
void memory_region_destroy(MemoryRegion *mr)
|
1018 |
{ |
1019 |
assert(QTAILQ_EMPTY(&mr->subregions)); |
1020 |
mr->destructor(mr); |
1021 |
memory_region_clear_coalescing(mr); |
1022 |
g_free((char *)mr->name);
|
1023 |
g_free(mr->ioeventfds); |
1024 |
} |
1025 |
|
1026 |
uint64_t memory_region_size(MemoryRegion *mr) |
1027 |
{ |
1028 |
if (int128_eq(mr->size, int128_2_64())) {
|
1029 |
return UINT64_MAX;
|
1030 |
} |
1031 |
return int128_get64(mr->size);
|
1032 |
} |
1033 |
|
1034 |
const char *memory_region_name(MemoryRegion *mr) |
1035 |
{ |
1036 |
return mr->name;
|
1037 |
} |
1038 |
|
1039 |
bool memory_region_is_ram(MemoryRegion *mr)
|
1040 |
{ |
1041 |
return mr->ram;
|
1042 |
} |
1043 |
|
1044 |
bool memory_region_is_logging(MemoryRegion *mr)
|
1045 |
{ |
1046 |
return mr->dirty_log_mask;
|
1047 |
} |
1048 |
|
1049 |
bool memory_region_is_rom(MemoryRegion *mr)
|
1050 |
{ |
1051 |
return mr->ram && mr->readonly;
|
1052 |
} |
1053 |
|
1054 |
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
1055 |
{ |
1056 |
uint8_t mask = 1 << client;
|
1057 |
|
1058 |
memory_region_transaction_begin(); |
1059 |
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
1060 |
memory_region_transaction_commit(); |
1061 |
} |
1062 |
|
1063 |
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1064 |
target_phys_addr_t size, unsigned client)
|
1065 |
{ |
1066 |
assert(mr->terminates); |
1067 |
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
|
1068 |
1 << client);
|
1069 |
} |
1070 |
|
1071 |
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1072 |
target_phys_addr_t size) |
1073 |
{ |
1074 |
assert(mr->terminates); |
1075 |
return cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1); |
1076 |
} |
1077 |
|
1078 |
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
|
1079 |
{ |
1080 |
AddressSpace *as; |
1081 |
FlatRange *fr; |
1082 |
|
1083 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1084 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1085 |
if (fr->mr == mr) {
|
1086 |
MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); |
1087 |
} |
1088 |
} |
1089 |
} |
1090 |
} |
1091 |
|
1092 |
void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
1093 |
{ |
1094 |
if (mr->readonly != readonly) {
|
1095 |
memory_region_transaction_begin(); |
1096 |
mr->readonly = readonly; |
1097 |
memory_region_transaction_commit(); |
1098 |
} |
1099 |
} |
1100 |
|
1101 |
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable) |
1102 |
{ |
1103 |
if (mr->readable != readable) {
|
1104 |
memory_region_transaction_begin(); |
1105 |
mr->readable = readable; |
1106 |
memory_region_transaction_commit(); |
1107 |
} |
1108 |
} |
1109 |
|
1110 |
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1111 |
target_phys_addr_t size, unsigned client)
|
1112 |
{ |
1113 |
assert(mr->terminates); |
1114 |
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
1115 |
mr->ram_addr + addr + size, |
1116 |
1 << client);
|
1117 |
} |
1118 |
|
1119 |
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
1120 |
{ |
1121 |
if (mr->alias) {
|
1122 |
return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
|
1123 |
} |
1124 |
|
1125 |
assert(mr->terminates); |
1126 |
|
1127 |
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
|
1128 |
} |
1129 |
|
1130 |
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) |
1131 |
{ |
1132 |
FlatRange *fr; |
1133 |
CoalescedMemoryRange *cmr; |
1134 |
AddrRange tmp; |
1135 |
MemoryRegionSection section; |
1136 |
|
1137 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1138 |
if (fr->mr == mr) {
|
1139 |
section = (MemoryRegionSection) { |
1140 |
.address_space = as, |
1141 |
.offset_within_address_space = int128_get64(fr->addr.start), |
1142 |
.size = int128_get64(fr->addr.size), |
1143 |
}; |
1144 |
|
1145 |
MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion, |
1146 |
int128_get64(fr->addr.start), |
1147 |
int128_get64(fr->addr.size)); |
1148 |
QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
1149 |
tmp = addrrange_shift(cmr->addr, |
1150 |
int128_sub(fr->addr.start, |
1151 |
int128_make64(fr->offset_in_region))); |
1152 |
if (!addrrange_intersects(tmp, fr->addr)) {
|
1153 |
continue;
|
1154 |
} |
1155 |
tmp = addrrange_intersection(tmp, fr->addr); |
1156 |
MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion, |
1157 |
int128_get64(tmp.start), |
1158 |
int128_get64(tmp.size)); |
1159 |
} |
1160 |
} |
1161 |
} |
1162 |
} |
1163 |
|
1164 |
static void memory_region_update_coalesced_range(MemoryRegion *mr) |
1165 |
{ |
1166 |
AddressSpace *as; |
1167 |
|
1168 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1169 |
memory_region_update_coalesced_range_as(mr, as); |
1170 |
} |
1171 |
} |
1172 |
|
1173 |
void memory_region_set_coalescing(MemoryRegion *mr)
|
1174 |
{ |
1175 |
memory_region_clear_coalescing(mr); |
1176 |
memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
|
1177 |
} |
1178 |
|
1179 |
void memory_region_add_coalescing(MemoryRegion *mr,
|
1180 |
target_phys_addr_t offset, |
1181 |
uint64_t size) |
1182 |
{ |
1183 |
CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
|
1184 |
|
1185 |
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); |
1186 |
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
1187 |
memory_region_update_coalesced_range(mr); |
1188 |
memory_region_set_flush_coalesced(mr); |
1189 |
} |
1190 |
|
1191 |
void memory_region_clear_coalescing(MemoryRegion *mr)
|
1192 |
{ |
1193 |
CoalescedMemoryRange *cmr; |
1194 |
|
1195 |
qemu_flush_coalesced_mmio_buffer(); |
1196 |
mr->flush_coalesced_mmio = false;
|
1197 |
|
1198 |
while (!QTAILQ_EMPTY(&mr->coalesced)) {
|
1199 |
cmr = QTAILQ_FIRST(&mr->coalesced); |
1200 |
QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
1201 |
g_free(cmr); |
1202 |
} |
1203 |
memory_region_update_coalesced_range(mr); |
1204 |
} |
1205 |
|
1206 |
void memory_region_set_flush_coalesced(MemoryRegion *mr)
|
1207 |
{ |
1208 |
mr->flush_coalesced_mmio = true;
|
1209 |
} |
1210 |
|
1211 |
void memory_region_clear_flush_coalesced(MemoryRegion *mr)
|
1212 |
{ |
1213 |
qemu_flush_coalesced_mmio_buffer(); |
1214 |
if (QTAILQ_EMPTY(&mr->coalesced)) {
|
1215 |
mr->flush_coalesced_mmio = false;
|
1216 |
} |
1217 |
} |
1218 |
|
1219 |
void memory_region_add_eventfd(MemoryRegion *mr,
|
1220 |
target_phys_addr_t addr, |
1221 |
unsigned size,
|
1222 |
bool match_data,
|
1223 |
uint64_t data, |
1224 |
EventNotifier *e) |
1225 |
{ |
1226 |
MemoryRegionIoeventfd mrfd = { |
1227 |
.addr.start = int128_make64(addr), |
1228 |
.addr.size = int128_make64(size), |
1229 |
.match_data = match_data, |
1230 |
.data = data, |
1231 |
.e = e, |
1232 |
}; |
1233 |
unsigned i;
|
1234 |
|
1235 |
memory_region_transaction_begin(); |
1236 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1237 |
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
|
1238 |
break;
|
1239 |
} |
1240 |
} |
1241 |
++mr->ioeventfd_nb; |
1242 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1243 |
sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
|
1244 |
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
|
1245 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); |
1246 |
mr->ioeventfds[i] = mrfd; |
1247 |
memory_region_transaction_commit(); |
1248 |
} |
1249 |
|
1250 |
void memory_region_del_eventfd(MemoryRegion *mr,
|
1251 |
target_phys_addr_t addr, |
1252 |
unsigned size,
|
1253 |
bool match_data,
|
1254 |
uint64_t data, |
1255 |
EventNotifier *e) |
1256 |
{ |
1257 |
MemoryRegionIoeventfd mrfd = { |
1258 |
.addr.start = int128_make64(addr), |
1259 |
.addr.size = int128_make64(size), |
1260 |
.match_data = match_data, |
1261 |
.data = data, |
1262 |
.e = e, |
1263 |
}; |
1264 |
unsigned i;
|
1265 |
|
1266 |
memory_region_transaction_begin(); |
1267 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1268 |
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
|
1269 |
break;
|
1270 |
} |
1271 |
} |
1272 |
assert(i != mr->ioeventfd_nb); |
1273 |
memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
|
1274 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); |
1275 |
--mr->ioeventfd_nb; |
1276 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1277 |
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); |
1278 |
memory_region_transaction_commit(); |
1279 |
} |
1280 |
|
1281 |
static void memory_region_add_subregion_common(MemoryRegion *mr, |
1282 |
target_phys_addr_t offset, |
1283 |
MemoryRegion *subregion) |
1284 |
{ |
1285 |
MemoryRegion *other; |
1286 |
|
1287 |
memory_region_transaction_begin(); |
1288 |
|
1289 |
assert(!subregion->parent); |
1290 |
subregion->parent = mr; |
1291 |
subregion->addr = offset; |
1292 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1293 |
if (subregion->may_overlap || other->may_overlap) {
|
1294 |
continue;
|
1295 |
} |
1296 |
if (int128_gt(int128_make64(offset),
|
1297 |
int128_add(int128_make64(other->addr), other->size)) |
1298 |
|| int128_le(int128_add(int128_make64(offset), subregion->size), |
1299 |
int128_make64(other->addr))) { |
1300 |
continue;
|
1301 |
} |
1302 |
#if 0
|
1303 |
printf("warning: subregion collision %llx/%llx (%s) "
|
1304 |
"vs %llx/%llx (%s)\n",
|
1305 |
(unsigned long long)offset,
|
1306 |
(unsigned long long)int128_get64(subregion->size),
|
1307 |
subregion->name,
|
1308 |
(unsigned long long)other->addr,
|
1309 |
(unsigned long long)int128_get64(other->size),
|
1310 |
other->name);
|
1311 |
#endif
|
1312 |
} |
1313 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1314 |
if (subregion->priority >= other->priority) {
|
1315 |
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
1316 |
goto done;
|
1317 |
} |
1318 |
} |
1319 |
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
1320 |
done:
|
1321 |
memory_region_transaction_commit(); |
1322 |
} |
1323 |
|
1324 |
|
1325 |
void memory_region_add_subregion(MemoryRegion *mr,
|
1326 |
target_phys_addr_t offset, |
1327 |
MemoryRegion *subregion) |
1328 |
{ |
1329 |
subregion->may_overlap = false;
|
1330 |
subregion->priority = 0;
|
1331 |
memory_region_add_subregion_common(mr, offset, subregion); |
1332 |
} |
1333 |
|
1334 |
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
1335 |
target_phys_addr_t offset, |
1336 |
MemoryRegion *subregion, |
1337 |
unsigned priority)
|
1338 |
{ |
1339 |
subregion->may_overlap = true;
|
1340 |
subregion->priority = priority; |
1341 |
memory_region_add_subregion_common(mr, offset, subregion); |
1342 |
} |
1343 |
|
1344 |
void memory_region_del_subregion(MemoryRegion *mr,
|
1345 |
MemoryRegion *subregion) |
1346 |
{ |
1347 |
memory_region_transaction_begin(); |
1348 |
assert(subregion->parent == mr); |
1349 |
subregion->parent = NULL;
|
1350 |
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
1351 |
memory_region_transaction_commit(); |
1352 |
} |
1353 |
|
1354 |
void memory_region_set_enabled(MemoryRegion *mr, bool enabled) |
1355 |
{ |
1356 |
if (enabled == mr->enabled) {
|
1357 |
return;
|
1358 |
} |
1359 |
memory_region_transaction_begin(); |
1360 |
mr->enabled = enabled; |
1361 |
memory_region_transaction_commit(); |
1362 |
} |
1363 |
|
1364 |
void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
|
1365 |
{ |
1366 |
MemoryRegion *parent = mr->parent; |
1367 |
unsigned priority = mr->priority;
|
1368 |
bool may_overlap = mr->may_overlap;
|
1369 |
|
1370 |
if (addr == mr->addr || !parent) {
|
1371 |
mr->addr = addr; |
1372 |
return;
|
1373 |
} |
1374 |
|
1375 |
memory_region_transaction_begin(); |
1376 |
memory_region_del_subregion(parent, mr); |
1377 |
if (may_overlap) {
|
1378 |
memory_region_add_subregion_overlap(parent, addr, mr, priority); |
1379 |
} else {
|
1380 |
memory_region_add_subregion(parent, addr, mr); |
1381 |
} |
1382 |
memory_region_transaction_commit(); |
1383 |
} |
1384 |
|
1385 |
void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
|
1386 |
{ |
1387 |
assert(mr->alias); |
1388 |
|
1389 |
if (offset == mr->alias_offset) {
|
1390 |
return;
|
1391 |
} |
1392 |
|
1393 |
memory_region_transaction_begin(); |
1394 |
mr->alias_offset = offset; |
1395 |
memory_region_transaction_commit(); |
1396 |
} |
1397 |
|
1398 |
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) |
1399 |
{ |
1400 |
return mr->ram_addr;
|
1401 |
} |
1402 |
|
1403 |
static int cmp_flatrange_addr(const void *addr_, const void *fr_) |
1404 |
{ |
1405 |
const AddrRange *addr = addr_;
|
1406 |
const FlatRange *fr = fr_;
|
1407 |
|
1408 |
if (int128_le(addrrange_end(*addr), fr->addr.start)) {
|
1409 |
return -1; |
1410 |
} else if (int128_ge(addr->start, addrrange_end(fr->addr))) { |
1411 |
return 1; |
1412 |
} |
1413 |
return 0; |
1414 |
} |
1415 |
|
1416 |
static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
|
1417 |
{ |
1418 |
return bsearch(&addr, as->current_map->ranges, as->current_map->nr,
|
1419 |
sizeof(FlatRange), cmp_flatrange_addr);
|
1420 |
} |
1421 |
|
1422 |
MemoryRegionSection memory_region_find(MemoryRegion *address_space, |
1423 |
target_phys_addr_t addr, uint64_t size) |
1424 |
{ |
1425 |
AddressSpace *as = memory_region_to_address_space(address_space); |
1426 |
AddrRange range = addrrange_make(int128_make64(addr), |
1427 |
int128_make64(size)); |
1428 |
FlatRange *fr = address_space_lookup(as, range); |
1429 |
MemoryRegionSection ret = { .mr = NULL, .size = 0 }; |
1430 |
|
1431 |
if (!fr) {
|
1432 |
return ret;
|
1433 |
} |
1434 |
|
1435 |
while (fr > as->current_map->ranges
|
1436 |
&& addrrange_intersects(fr[-1].addr, range)) {
|
1437 |
--fr; |
1438 |
} |
1439 |
|
1440 |
ret.mr = fr->mr; |
1441 |
range = addrrange_intersection(range, fr->addr); |
1442 |
ret.offset_within_region = fr->offset_in_region; |
1443 |
ret.offset_within_region += int128_get64(int128_sub(range.start, |
1444 |
fr->addr.start)); |
1445 |
ret.size = int128_get64(range.size); |
1446 |
ret.offset_within_address_space = int128_get64(range.start); |
1447 |
ret.readonly = fr->readonly; |
1448 |
return ret;
|
1449 |
} |
1450 |
|
1451 |
void memory_global_sync_dirty_bitmap(MemoryRegion *address_space)
|
1452 |
{ |
1453 |
AddressSpace *as = memory_region_to_address_space(address_space); |
1454 |
FlatRange *fr; |
1455 |
|
1456 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1457 |
MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); |
1458 |
} |
1459 |
} |
1460 |
|
1461 |
void memory_global_dirty_log_start(void) |
1462 |
{ |
1463 |
global_dirty_log = true;
|
1464 |
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); |
1465 |
} |
1466 |
|
1467 |
void memory_global_dirty_log_stop(void) |
1468 |
{ |
1469 |
global_dirty_log = false;
|
1470 |
MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); |
1471 |
} |
1472 |
|
1473 |
static void listener_add_address_space(MemoryListener *listener, |
1474 |
AddressSpace *as) |
1475 |
{ |
1476 |
FlatRange *fr; |
1477 |
|
1478 |
if (listener->address_space_filter
|
1479 |
&& listener->address_space_filter != as) { |
1480 |
return;
|
1481 |
} |
1482 |
|
1483 |
if (global_dirty_log) {
|
1484 |
if (listener->log_global_start) {
|
1485 |
listener->log_global_start(listener); |
1486 |
} |
1487 |
} |
1488 |
|
1489 |
FOR_EACH_FLAT_RANGE(fr, as->current_map) { |
1490 |
MemoryRegionSection section = { |
1491 |
.mr = fr->mr, |
1492 |
.address_space = as, |
1493 |
.offset_within_region = fr->offset_in_region, |
1494 |
.size = int128_get64(fr->addr.size), |
1495 |
.offset_within_address_space = int128_get64(fr->addr.start), |
1496 |
.readonly = fr->readonly, |
1497 |
}; |
1498 |
if (listener->region_add) {
|
1499 |
listener->region_add(listener, §ion); |
1500 |
} |
1501 |
} |
1502 |
} |
1503 |
|
1504 |
void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
|
1505 |
{ |
1506 |
MemoryListener *other = NULL;
|
1507 |
AddressSpace *as; |
1508 |
|
1509 |
listener->address_space_filter = filter; |
1510 |
if (QTAILQ_EMPTY(&memory_listeners)
|
1511 |
|| listener->priority >= QTAILQ_LAST(&memory_listeners, |
1512 |
memory_listeners)->priority) { |
1513 |
QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); |
1514 |
} else {
|
1515 |
QTAILQ_FOREACH(other, &memory_listeners, link) { |
1516 |
if (listener->priority < other->priority) {
|
1517 |
break;
|
1518 |
} |
1519 |
} |
1520 |
QTAILQ_INSERT_BEFORE(other, listener, link); |
1521 |
} |
1522 |
|
1523 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1524 |
listener_add_address_space(listener, as); |
1525 |
} |
1526 |
} |
1527 |
|
1528 |
void memory_listener_unregister(MemoryListener *listener)
|
1529 |
{ |
1530 |
QTAILQ_REMOVE(&memory_listeners, listener, link); |
1531 |
} |
1532 |
|
1533 |
void address_space_init(AddressSpace *as, MemoryRegion *root)
|
1534 |
{ |
1535 |
memory_region_transaction_begin(); |
1536 |
as->root = root; |
1537 |
as->current_map = g_new(FlatView, 1);
|
1538 |
flatview_init(as->current_map); |
1539 |
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); |
1540 |
as->name = NULL;
|
1541 |
memory_region_transaction_commit(); |
1542 |
address_space_init_dispatch(as); |
1543 |
} |
1544 |
|
1545 |
uint64_t io_mem_read(MemoryRegion *mr, target_phys_addr_t addr, unsigned size)
|
1546 |
{ |
1547 |
return memory_region_dispatch_read(mr, addr, size);
|
1548 |
} |
1549 |
|
1550 |
void io_mem_write(MemoryRegion *mr, target_phys_addr_t addr,
|
1551 |
uint64_t val, unsigned size)
|
1552 |
{ |
1553 |
memory_region_dispatch_write(mr, addr, val, size); |
1554 |
} |
1555 |
|
1556 |
typedef struct MemoryRegionList MemoryRegionList; |
1557 |
|
1558 |
struct MemoryRegionList {
|
1559 |
const MemoryRegion *mr;
|
1560 |
bool printed;
|
1561 |
QTAILQ_ENTRY(MemoryRegionList) queue; |
1562 |
}; |
1563 |
|
1564 |
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
|
1565 |
|
1566 |
static void mtree_print_mr(fprintf_function mon_printf, void *f, |
1567 |
const MemoryRegion *mr, unsigned int level, |
1568 |
target_phys_addr_t base, |
1569 |
MemoryRegionListHead *alias_print_queue) |
1570 |
{ |
1571 |
MemoryRegionList *new_ml, *ml, *next_ml; |
1572 |
MemoryRegionListHead submr_print_queue; |
1573 |
const MemoryRegion *submr;
|
1574 |
unsigned int i; |
1575 |
|
1576 |
if (!mr) {
|
1577 |
return;
|
1578 |
} |
1579 |
|
1580 |
for (i = 0; i < level; i++) { |
1581 |
mon_printf(f, " ");
|
1582 |
} |
1583 |
|
1584 |
if (mr->alias) {
|
1585 |
MemoryRegionList *ml; |
1586 |
bool found = false; |
1587 |
|
1588 |
/* check if the alias is already in the queue */
|
1589 |
QTAILQ_FOREACH(ml, alias_print_queue, queue) { |
1590 |
if (ml->mr == mr->alias && !ml->printed) {
|
1591 |
found = true;
|
1592 |
} |
1593 |
} |
1594 |
|
1595 |
if (!found) {
|
1596 |
ml = g_new(MemoryRegionList, 1);
|
1597 |
ml->mr = mr->alias; |
1598 |
ml->printed = false;
|
1599 |
QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue); |
1600 |
} |
1601 |
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
|
1602 |
" (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
|
1603 |
"-" TARGET_FMT_plx "\n", |
1604 |
base + mr->addr, |
1605 |
base + mr->addr |
1606 |
+ (target_phys_addr_t)int128_get64(mr->size) - 1,
|
1607 |
mr->priority, |
1608 |
mr->readable ? 'R' : '-', |
1609 |
!mr->readonly && !(mr->rom_device && mr->readable) ? 'W'
|
1610 |
: '-',
|
1611 |
mr->name, |
1612 |
mr->alias->name, |
1613 |
mr->alias_offset, |
1614 |
mr->alias_offset |
1615 |
+ (target_phys_addr_t)int128_get64(mr->size) - 1);
|
1616 |
} else {
|
1617 |
mon_printf(f, |
1618 |
TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n", |
1619 |
base + mr->addr, |
1620 |
base + mr->addr |
1621 |
+ (target_phys_addr_t)int128_get64(mr->size) - 1,
|
1622 |
mr->priority, |
1623 |
mr->readable ? 'R' : '-', |
1624 |
!mr->readonly && !(mr->rom_device && mr->readable) ? 'W'
|
1625 |
: '-',
|
1626 |
mr->name); |
1627 |
} |
1628 |
|
1629 |
QTAILQ_INIT(&submr_print_queue); |
1630 |
|
1631 |
QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { |
1632 |
new_ml = g_new(MemoryRegionList, 1);
|
1633 |
new_ml->mr = submr; |
1634 |
QTAILQ_FOREACH(ml, &submr_print_queue, queue) { |
1635 |
if (new_ml->mr->addr < ml->mr->addr ||
|
1636 |
(new_ml->mr->addr == ml->mr->addr && |
1637 |
new_ml->mr->priority > ml->mr->priority)) { |
1638 |
QTAILQ_INSERT_BEFORE(ml, new_ml, queue); |
1639 |
new_ml = NULL;
|
1640 |
break;
|
1641 |
} |
1642 |
} |
1643 |
if (new_ml) {
|
1644 |
QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue); |
1645 |
} |
1646 |
} |
1647 |
|
1648 |
QTAILQ_FOREACH(ml, &submr_print_queue, queue) { |
1649 |
mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
|
1650 |
alias_print_queue); |
1651 |
} |
1652 |
|
1653 |
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) { |
1654 |
g_free(ml); |
1655 |
} |
1656 |
} |
1657 |
|
1658 |
void mtree_info(fprintf_function mon_printf, void *f) |
1659 |
{ |
1660 |
MemoryRegionListHead ml_head; |
1661 |
MemoryRegionList *ml, *ml2; |
1662 |
AddressSpace *as; |
1663 |
|
1664 |
QTAILQ_INIT(&ml_head); |
1665 |
|
1666 |
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1667 |
if (!as->name) {
|
1668 |
continue;
|
1669 |
} |
1670 |
mon_printf(f, "%s\n", as->name);
|
1671 |
mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head); |
1672 |
} |
1673 |
|
1674 |
mon_printf(f, "aliases\n");
|
1675 |
/* print aliased regions */
|
1676 |
QTAILQ_FOREACH(ml, &ml_head, queue) { |
1677 |
if (!ml->printed) {
|
1678 |
mon_printf(f, "%s\n", ml->mr->name);
|
1679 |
mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); |
1680 |
} |
1681 |
} |
1682 |
|
1683 |
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) { |
1684 |
g_free(ml); |
1685 |
} |
1686 |
} |