root / memory.c @ cd7a45c9
History | View | Annotate | Download (49.6 kB)
1 |
/*
|
---|---|
2 |
* Physical memory management
|
3 |
*
|
4 |
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
5 |
*
|
6 |
* Authors:
|
7 |
* Avi Kivity <avi@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Contributions after 2012-01-13 are licensed under the terms of the
|
13 |
* GNU GPL, version 2 or (at your option) any later version.
|
14 |
*/
|
15 |
|
16 |
#include "memory.h" |
17 |
#include "exec-memory.h" |
18 |
#include "ioport.h" |
19 |
#include "bitops.h" |
20 |
#include "kvm.h" |
21 |
#include <assert.h> |
22 |
|
23 |
#define WANT_EXEC_OBSOLETE
|
24 |
#include "exec-obsolete.h" |
25 |
|
26 |
unsigned memory_region_transaction_depth = 0; |
27 |
static bool memory_region_update_pending = false; |
28 |
static bool global_dirty_log = false; |
29 |
|
30 |
static QLIST_HEAD(, MemoryListener) memory_listeners
|
31 |
= QLIST_HEAD_INITIALIZER(memory_listeners); |
32 |
|
33 |
typedef struct AddrRange AddrRange; |
34 |
|
35 |
/*
|
36 |
* Note using signed integers limits us to physical addresses at most
|
37 |
* 63 bits wide. They are needed for negative offsetting in aliases
|
38 |
* (large MemoryRegion::alias_offset).
|
39 |
*/
|
40 |
struct AddrRange {
|
41 |
Int128 start; |
42 |
Int128 size; |
43 |
}; |
44 |
|
45 |
static AddrRange addrrange_make(Int128 start, Int128 size)
|
46 |
{ |
47 |
return (AddrRange) { start, size };
|
48 |
} |
49 |
|
50 |
static bool addrrange_equal(AddrRange r1, AddrRange r2) |
51 |
{ |
52 |
return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
|
53 |
} |
54 |
|
55 |
static Int128 addrrange_end(AddrRange r)
|
56 |
{ |
57 |
return int128_add(r.start, r.size);
|
58 |
} |
59 |
|
60 |
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
|
61 |
{ |
62 |
int128_addto(&range.start, delta); |
63 |
return range;
|
64 |
} |
65 |
|
66 |
static bool addrrange_contains(AddrRange range, Int128 addr) |
67 |
{ |
68 |
return int128_ge(addr, range.start)
|
69 |
&& int128_lt(addr, addrrange_end(range)); |
70 |
} |
71 |
|
72 |
static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
73 |
{ |
74 |
return addrrange_contains(r1, r2.start)
|
75 |
|| addrrange_contains(r2, r1.start); |
76 |
} |
77 |
|
78 |
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
79 |
{ |
80 |
Int128 start = int128_max(r1.start, r2.start); |
81 |
Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); |
82 |
return addrrange_make(start, int128_sub(end, start));
|
83 |
} |
84 |
|
85 |
struct CoalescedMemoryRange {
|
86 |
AddrRange addr; |
87 |
QTAILQ_ENTRY(CoalescedMemoryRange) link; |
88 |
}; |
89 |
|
90 |
struct MemoryRegionIoeventfd {
|
91 |
AddrRange addr; |
92 |
bool match_data;
|
93 |
uint64_t data; |
94 |
int fd;
|
95 |
}; |
96 |
|
97 |
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, |
98 |
MemoryRegionIoeventfd b) |
99 |
{ |
100 |
if (int128_lt(a.addr.start, b.addr.start)) {
|
101 |
return true; |
102 |
} else if (int128_gt(a.addr.start, b.addr.start)) { |
103 |
return false; |
104 |
} else if (int128_lt(a.addr.size, b.addr.size)) { |
105 |
return true; |
106 |
} else if (int128_gt(a.addr.size, b.addr.size)) { |
107 |
return false; |
108 |
} else if (a.match_data < b.match_data) { |
109 |
return true; |
110 |
} else if (a.match_data > b.match_data) { |
111 |
return false; |
112 |
} else if (a.match_data) { |
113 |
if (a.data < b.data) {
|
114 |
return true; |
115 |
} else if (a.data > b.data) { |
116 |
return false; |
117 |
} |
118 |
} |
119 |
if (a.fd < b.fd) {
|
120 |
return true; |
121 |
} else if (a.fd > b.fd) { |
122 |
return false; |
123 |
} |
124 |
return false; |
125 |
} |
126 |
|
127 |
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, |
128 |
MemoryRegionIoeventfd b) |
129 |
{ |
130 |
return !memory_region_ioeventfd_before(a, b)
|
131 |
&& !memory_region_ioeventfd_before(b, a); |
132 |
} |
133 |
|
134 |
typedef struct FlatRange FlatRange; |
135 |
typedef struct FlatView FlatView; |
136 |
|
137 |
/* Range of memory in the global map. Addresses are absolute. */
|
138 |
struct FlatRange {
|
139 |
MemoryRegion *mr; |
140 |
target_phys_addr_t offset_in_region; |
141 |
AddrRange addr; |
142 |
uint8_t dirty_log_mask; |
143 |
bool readable;
|
144 |
bool readonly;
|
145 |
}; |
146 |
|
147 |
/* Flattened global view of current active memory hierarchy. Kept in sorted
|
148 |
* order.
|
149 |
*/
|
150 |
struct FlatView {
|
151 |
FlatRange *ranges; |
152 |
unsigned nr;
|
153 |
unsigned nr_allocated;
|
154 |
}; |
155 |
|
156 |
typedef struct AddressSpace AddressSpace; |
157 |
typedef struct AddressSpaceOps AddressSpaceOps; |
158 |
|
159 |
/* A system address space - I/O, memory, etc. */
|
160 |
struct AddressSpace {
|
161 |
const AddressSpaceOps *ops;
|
162 |
MemoryRegion *root; |
163 |
FlatView current_map; |
164 |
int ioeventfd_nb;
|
165 |
MemoryRegionIoeventfd *ioeventfds; |
166 |
}; |
167 |
|
168 |
struct AddressSpaceOps {
|
169 |
void (*range_add)(AddressSpace *as, FlatRange *fr);
|
170 |
void (*range_del)(AddressSpace *as, FlatRange *fr);
|
171 |
void (*log_start)(AddressSpace *as, FlatRange *fr);
|
172 |
void (*log_stop)(AddressSpace *as, FlatRange *fr);
|
173 |
void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
|
174 |
void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
|
175 |
}; |
176 |
|
177 |
#define FOR_EACH_FLAT_RANGE(var, view) \
|
178 |
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
179 |
|
180 |
static bool flatrange_equal(FlatRange *a, FlatRange *b) |
181 |
{ |
182 |
return a->mr == b->mr
|
183 |
&& addrrange_equal(a->addr, b->addr) |
184 |
&& a->offset_in_region == b->offset_in_region |
185 |
&& a->readable == b->readable |
186 |
&& a->readonly == b->readonly; |
187 |
} |
188 |
|
189 |
static void flatview_init(FlatView *view) |
190 |
{ |
191 |
view->ranges = NULL;
|
192 |
view->nr = 0;
|
193 |
view->nr_allocated = 0;
|
194 |
} |
195 |
|
196 |
/* Insert a range into a given position. Caller is responsible for maintaining
|
197 |
* sorting order.
|
198 |
*/
|
199 |
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
200 |
{ |
201 |
if (view->nr == view->nr_allocated) {
|
202 |
view->nr_allocated = MAX(2 * view->nr, 10); |
203 |
view->ranges = g_realloc(view->ranges, |
204 |
view->nr_allocated * sizeof(*view->ranges));
|
205 |
} |
206 |
memmove(view->ranges + pos + 1, view->ranges + pos,
|
207 |
(view->nr - pos) * sizeof(FlatRange));
|
208 |
view->ranges[pos] = *range; |
209 |
++view->nr; |
210 |
} |
211 |
|
212 |
static void flatview_destroy(FlatView *view) |
213 |
{ |
214 |
g_free(view->ranges); |
215 |
} |
216 |
|
217 |
static bool can_merge(FlatRange *r1, FlatRange *r2) |
218 |
{ |
219 |
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
|
220 |
&& r1->mr == r2->mr |
221 |
&& int128_eq(int128_add(int128_make64(r1->offset_in_region), |
222 |
r1->addr.size), |
223 |
int128_make64(r2->offset_in_region)) |
224 |
&& r1->dirty_log_mask == r2->dirty_log_mask |
225 |
&& r1->readable == r2->readable |
226 |
&& r1->readonly == r2->readonly; |
227 |
} |
228 |
|
229 |
/* Attempt to simplify a view by merging ajacent ranges */
|
230 |
static void flatview_simplify(FlatView *view) |
231 |
{ |
232 |
unsigned i, j;
|
233 |
|
234 |
i = 0;
|
235 |
while (i < view->nr) {
|
236 |
j = i + 1;
|
237 |
while (j < view->nr
|
238 |
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
239 |
int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); |
240 |
++j; |
241 |
} |
242 |
++i; |
243 |
memmove(&view->ranges[i], &view->ranges[j], |
244 |
(view->nr - j) * sizeof(view->ranges[j]));
|
245 |
view->nr -= j - i; |
246 |
} |
247 |
} |
248 |
|
249 |
static void memory_region_read_accessor(void *opaque, |
250 |
target_phys_addr_t addr, |
251 |
uint64_t *value, |
252 |
unsigned size,
|
253 |
unsigned shift,
|
254 |
uint64_t mask) |
255 |
{ |
256 |
MemoryRegion *mr = opaque; |
257 |
uint64_t tmp; |
258 |
|
259 |
tmp = mr->ops->read(mr->opaque, addr, size); |
260 |
*value |= (tmp & mask) << shift; |
261 |
} |
262 |
|
263 |
static void memory_region_write_accessor(void *opaque, |
264 |
target_phys_addr_t addr, |
265 |
uint64_t *value, |
266 |
unsigned size,
|
267 |
unsigned shift,
|
268 |
uint64_t mask) |
269 |
{ |
270 |
MemoryRegion *mr = opaque; |
271 |
uint64_t tmp; |
272 |
|
273 |
tmp = (*value >> shift) & mask; |
274 |
mr->ops->write(mr->opaque, addr, tmp, size); |
275 |
} |
276 |
|
277 |
static void access_with_adjusted_size(target_phys_addr_t addr, |
278 |
uint64_t *value, |
279 |
unsigned size,
|
280 |
unsigned access_size_min,
|
281 |
unsigned access_size_max,
|
282 |
void (*access)(void *opaque, |
283 |
target_phys_addr_t addr, |
284 |
uint64_t *value, |
285 |
unsigned size,
|
286 |
unsigned shift,
|
287 |
uint64_t mask), |
288 |
void *opaque)
|
289 |
{ |
290 |
uint64_t access_mask; |
291 |
unsigned access_size;
|
292 |
unsigned i;
|
293 |
|
294 |
if (!access_size_min) {
|
295 |
access_size_min = 1;
|
296 |
} |
297 |
if (!access_size_max) {
|
298 |
access_size_max = 4;
|
299 |
} |
300 |
access_size = MAX(MIN(size, access_size_max), access_size_min); |
301 |
access_mask = -1ULL >> (64 - access_size * 8); |
302 |
for (i = 0; i < size; i += access_size) { |
303 |
/* FIXME: big-endian support */
|
304 |
access(opaque, addr + i, value, access_size, i * 8, access_mask);
|
305 |
} |
306 |
} |
307 |
|
308 |
static void as_memory_range_add(AddressSpace *as, FlatRange *fr) |
309 |
{ |
310 |
MemoryRegionSection section = { |
311 |
.mr = fr->mr, |
312 |
.offset_within_address_space = int128_get64(fr->addr.start), |
313 |
.offset_within_region = fr->offset_in_region, |
314 |
.size = int128_get64(fr->addr.size), |
315 |
}; |
316 |
|
317 |
cpu_register_physical_memory_log(§ion, fr->readable, fr->readonly); |
318 |
} |
319 |
|
320 |
static void as_memory_range_del(AddressSpace *as, FlatRange *fr) |
321 |
{ |
322 |
MemoryRegionSection section = { |
323 |
.mr = &io_mem_unassigned, |
324 |
.offset_within_address_space = int128_get64(fr->addr.start), |
325 |
.offset_within_region = int128_get64(fr->addr.start), |
326 |
.size = int128_get64(fr->addr.size), |
327 |
}; |
328 |
|
329 |
cpu_register_physical_memory_log(§ion, true, false); |
330 |
} |
331 |
|
332 |
static void as_memory_log_start(AddressSpace *as, FlatRange *fr) |
333 |
{ |
334 |
} |
335 |
|
336 |
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) |
337 |
{ |
338 |
} |
339 |
|
340 |
static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
341 |
{ |
342 |
int r;
|
343 |
|
344 |
assert(fd->match_data && int128_get64(fd->addr.size) == 4);
|
345 |
|
346 |
r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start), |
347 |
fd->data, true);
|
348 |
if (r < 0) { |
349 |
abort(); |
350 |
} |
351 |
} |
352 |
|
353 |
static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) |
354 |
{ |
355 |
int r;
|
356 |
|
357 |
r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start), |
358 |
fd->data, false);
|
359 |
if (r < 0) { |
360 |
abort(); |
361 |
} |
362 |
} |
363 |
|
364 |
static const AddressSpaceOps address_space_ops_memory = { |
365 |
.range_add = as_memory_range_add, |
366 |
.range_del = as_memory_range_del, |
367 |
.log_start = as_memory_log_start, |
368 |
.log_stop = as_memory_log_stop, |
369 |
.ioeventfd_add = as_memory_ioeventfd_add, |
370 |
.ioeventfd_del = as_memory_ioeventfd_del, |
371 |
}; |
372 |
|
373 |
static AddressSpace address_space_memory = {
|
374 |
.ops = &address_space_ops_memory, |
375 |
}; |
376 |
|
377 |
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset, |
378 |
unsigned width, bool write) |
379 |
{ |
380 |
const MemoryRegionPortio *mrp;
|
381 |
|
382 |
for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
|
383 |
if (offset >= mrp->offset && offset < mrp->offset + mrp->len
|
384 |
&& width == mrp->size |
385 |
&& (write ? (bool)mrp->write : (bool)mrp->read)) { |
386 |
return mrp;
|
387 |
} |
388 |
} |
389 |
return NULL; |
390 |
} |
391 |
|
392 |
static void memory_region_iorange_read(IORange *iorange, |
393 |
uint64_t offset, |
394 |
unsigned width,
|
395 |
uint64_t *data) |
396 |
{ |
397 |
MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); |
398 |
|
399 |
if (mr->ops->old_portio) {
|
400 |
const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false); |
401 |
|
402 |
*data = ((uint64_t)1 << (width * 8)) - 1; |
403 |
if (mrp) {
|
404 |
*data = mrp->read(mr->opaque, offset + mr->offset); |
405 |
} else if (width == 2) { |
406 |
mrp = find_portio(mr, offset, 1, false); |
407 |
assert(mrp); |
408 |
*data = mrp->read(mr->opaque, offset + mr->offset) | |
409 |
(mrp->read(mr->opaque, offset + mr->offset + 1) << 8); |
410 |
} |
411 |
return;
|
412 |
} |
413 |
*data = 0;
|
414 |
access_with_adjusted_size(offset + mr->offset, data, width, |
415 |
mr->ops->impl.min_access_size, |
416 |
mr->ops->impl.max_access_size, |
417 |
memory_region_read_accessor, mr); |
418 |
} |
419 |
|
420 |
static void memory_region_iorange_write(IORange *iorange, |
421 |
uint64_t offset, |
422 |
unsigned width,
|
423 |
uint64_t data) |
424 |
{ |
425 |
MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); |
426 |
|
427 |
if (mr->ops->old_portio) {
|
428 |
const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true); |
429 |
|
430 |
if (mrp) {
|
431 |
mrp->write(mr->opaque, offset + mr->offset, data); |
432 |
} else if (width == 2) { |
433 |
mrp = find_portio(mr, offset, 1, false); |
434 |
assert(mrp); |
435 |
mrp->write(mr->opaque, offset + mr->offset, data & 0xff);
|
436 |
mrp->write(mr->opaque, offset + mr->offset + 1, data >> 8); |
437 |
} |
438 |
return;
|
439 |
} |
440 |
access_with_adjusted_size(offset + mr->offset, &data, width, |
441 |
mr->ops->impl.min_access_size, |
442 |
mr->ops->impl.max_access_size, |
443 |
memory_region_write_accessor, mr); |
444 |
} |
445 |
|
446 |
static const IORangeOps memory_region_iorange_ops = { |
447 |
.read = memory_region_iorange_read, |
448 |
.write = memory_region_iorange_write, |
449 |
}; |
450 |
|
451 |
static void as_io_range_add(AddressSpace *as, FlatRange *fr) |
452 |
{ |
453 |
iorange_init(&fr->mr->iorange, &memory_region_iorange_ops, |
454 |
int128_get64(fr->addr.start), int128_get64(fr->addr.size)); |
455 |
ioport_register(&fr->mr->iorange); |
456 |
} |
457 |
|
458 |
static void as_io_range_del(AddressSpace *as, FlatRange *fr) |
459 |
{ |
460 |
isa_unassign_ioport(int128_get64(fr->addr.start), |
461 |
int128_get64(fr->addr.size)); |
462 |
} |
463 |
|
464 |
static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
465 |
{ |
466 |
int r;
|
467 |
|
468 |
assert(fd->match_data && int128_get64(fd->addr.size) == 2);
|
469 |
|
470 |
r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start), |
471 |
fd->data, true);
|
472 |
if (r < 0) { |
473 |
abort(); |
474 |
} |
475 |
} |
476 |
|
477 |
static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) |
478 |
{ |
479 |
int r;
|
480 |
|
481 |
r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start), |
482 |
fd->data, false);
|
483 |
if (r < 0) { |
484 |
abort(); |
485 |
} |
486 |
} |
487 |
|
488 |
static const AddressSpaceOps address_space_ops_io = { |
489 |
.range_add = as_io_range_add, |
490 |
.range_del = as_io_range_del, |
491 |
.ioeventfd_add = as_io_ioeventfd_add, |
492 |
.ioeventfd_del = as_io_ioeventfd_del, |
493 |
}; |
494 |
|
495 |
static AddressSpace address_space_io = {
|
496 |
.ops = &address_space_ops_io, |
497 |
}; |
498 |
|
499 |
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
|
500 |
{ |
501 |
while (mr->parent) {
|
502 |
mr = mr->parent; |
503 |
} |
504 |
if (mr == address_space_memory.root) {
|
505 |
return &address_space_memory;
|
506 |
} |
507 |
if (mr == address_space_io.root) {
|
508 |
return &address_space_io;
|
509 |
} |
510 |
abort(); |
511 |
} |
512 |
|
513 |
/* Render a memory region into the global view. Ranges in @view obscure
|
514 |
* ranges in @mr.
|
515 |
*/
|
516 |
static void render_memory_region(FlatView *view, |
517 |
MemoryRegion *mr, |
518 |
Int128 base, |
519 |
AddrRange clip, |
520 |
bool readonly)
|
521 |
{ |
522 |
MemoryRegion *subregion; |
523 |
unsigned i;
|
524 |
target_phys_addr_t offset_in_region; |
525 |
Int128 remain; |
526 |
Int128 now; |
527 |
FlatRange fr; |
528 |
AddrRange tmp; |
529 |
|
530 |
if (!mr->enabled) {
|
531 |
return;
|
532 |
} |
533 |
|
534 |
int128_addto(&base, int128_make64(mr->addr)); |
535 |
readonly |= mr->readonly; |
536 |
|
537 |
tmp = addrrange_make(base, mr->size); |
538 |
|
539 |
if (!addrrange_intersects(tmp, clip)) {
|
540 |
return;
|
541 |
} |
542 |
|
543 |
clip = addrrange_intersection(tmp, clip); |
544 |
|
545 |
if (mr->alias) {
|
546 |
int128_subfrom(&base, int128_make64(mr->alias->addr)); |
547 |
int128_subfrom(&base, int128_make64(mr->alias_offset)); |
548 |
render_memory_region(view, mr->alias, base, clip, readonly); |
549 |
return;
|
550 |
} |
551 |
|
552 |
/* Render subregions in priority order. */
|
553 |
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
554 |
render_memory_region(view, subregion, base, clip, readonly); |
555 |
} |
556 |
|
557 |
if (!mr->terminates) {
|
558 |
return;
|
559 |
} |
560 |
|
561 |
offset_in_region = int128_get64(int128_sub(clip.start, base)); |
562 |
base = clip.start; |
563 |
remain = clip.size; |
564 |
|
565 |
/* Render the region itself into any gaps left by the current view. */
|
566 |
for (i = 0; i < view->nr && int128_nz(remain); ++i) { |
567 |
if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
|
568 |
continue;
|
569 |
} |
570 |
if (int128_lt(base, view->ranges[i].addr.start)) {
|
571 |
now = int128_min(remain, |
572 |
int128_sub(view->ranges[i].addr.start, base)); |
573 |
fr.mr = mr; |
574 |
fr.offset_in_region = offset_in_region; |
575 |
fr.addr = addrrange_make(base, now); |
576 |
fr.dirty_log_mask = mr->dirty_log_mask; |
577 |
fr.readable = mr->readable; |
578 |
fr.readonly = readonly; |
579 |
flatview_insert(view, i, &fr); |
580 |
++i; |
581 |
int128_addto(&base, now); |
582 |
offset_in_region += int128_get64(now); |
583 |
int128_subfrom(&remain, now); |
584 |
} |
585 |
if (int128_eq(base, view->ranges[i].addr.start)) {
|
586 |
now = int128_min(remain, view->ranges[i].addr.size); |
587 |
int128_addto(&base, now); |
588 |
offset_in_region += int128_get64(now); |
589 |
int128_subfrom(&remain, now); |
590 |
} |
591 |
} |
592 |
if (int128_nz(remain)) {
|
593 |
fr.mr = mr; |
594 |
fr.offset_in_region = offset_in_region; |
595 |
fr.addr = addrrange_make(base, remain); |
596 |
fr.dirty_log_mask = mr->dirty_log_mask; |
597 |
fr.readable = mr->readable; |
598 |
fr.readonly = readonly; |
599 |
flatview_insert(view, i, &fr); |
600 |
} |
601 |
} |
602 |
|
603 |
/* Render a memory topology into a list of disjoint absolute ranges. */
|
604 |
static FlatView generate_memory_topology(MemoryRegion *mr)
|
605 |
{ |
606 |
FlatView view; |
607 |
|
608 |
flatview_init(&view); |
609 |
|
610 |
render_memory_region(&view, mr, int128_zero(), |
611 |
addrrange_make(int128_zero(), int128_2_64()), false);
|
612 |
flatview_simplify(&view); |
613 |
|
614 |
return view;
|
615 |
} |
616 |
|
617 |
static void address_space_add_del_ioeventfds(AddressSpace *as, |
618 |
MemoryRegionIoeventfd *fds_new, |
619 |
unsigned fds_new_nb,
|
620 |
MemoryRegionIoeventfd *fds_old, |
621 |
unsigned fds_old_nb)
|
622 |
{ |
623 |
unsigned iold, inew;
|
624 |
|
625 |
/* Generate a symmetric difference of the old and new fd sets, adding
|
626 |
* and deleting as necessary.
|
627 |
*/
|
628 |
|
629 |
iold = inew = 0;
|
630 |
while (iold < fds_old_nb || inew < fds_new_nb) {
|
631 |
if (iold < fds_old_nb
|
632 |
&& (inew == fds_new_nb |
633 |
|| memory_region_ioeventfd_before(fds_old[iold], |
634 |
fds_new[inew]))) { |
635 |
as->ops->ioeventfd_del(as, &fds_old[iold]); |
636 |
++iold; |
637 |
} else if (inew < fds_new_nb |
638 |
&& (iold == fds_old_nb |
639 |
|| memory_region_ioeventfd_before(fds_new[inew], |
640 |
fds_old[iold]))) { |
641 |
as->ops->ioeventfd_add(as, &fds_new[inew]); |
642 |
++inew; |
643 |
} else {
|
644 |
++iold; |
645 |
++inew; |
646 |
} |
647 |
} |
648 |
} |
649 |
|
650 |
static void address_space_update_ioeventfds(AddressSpace *as) |
651 |
{ |
652 |
FlatRange *fr; |
653 |
unsigned ioeventfd_nb = 0; |
654 |
MemoryRegionIoeventfd *ioeventfds = NULL;
|
655 |
AddrRange tmp; |
656 |
unsigned i;
|
657 |
|
658 |
FOR_EACH_FLAT_RANGE(fr, &as->current_map) { |
659 |
for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { |
660 |
tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, |
661 |
int128_sub(fr->addr.start, |
662 |
int128_make64(fr->offset_in_region))); |
663 |
if (addrrange_intersects(fr->addr, tmp)) {
|
664 |
++ioeventfd_nb; |
665 |
ioeventfds = g_realloc(ioeventfds, |
666 |
ioeventfd_nb * sizeof(*ioeventfds));
|
667 |
ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
|
668 |
ioeventfds[ioeventfd_nb-1].addr = tmp;
|
669 |
} |
670 |
} |
671 |
} |
672 |
|
673 |
address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, |
674 |
as->ioeventfds, as->ioeventfd_nb); |
675 |
|
676 |
g_free(as->ioeventfds); |
677 |
as->ioeventfds = ioeventfds; |
678 |
as->ioeventfd_nb = ioeventfd_nb; |
679 |
} |
680 |
|
681 |
typedef void ListenerCallback(MemoryListener *listener, |
682 |
MemoryRegionSection *mrs); |
683 |
|
684 |
/* Want "void (&MemoryListener::*callback)(const MemoryRegionSection& s)" */
|
685 |
static void memory_listener_update_region(FlatRange *fr, AddressSpace *as, |
686 |
size_t callback_offset) |
687 |
{ |
688 |
MemoryRegionSection section = { |
689 |
.mr = fr->mr, |
690 |
.address_space = as->root, |
691 |
.offset_within_region = fr->offset_in_region, |
692 |
.size = int128_get64(fr->addr.size), |
693 |
.offset_within_address_space = int128_get64(fr->addr.start), |
694 |
}; |
695 |
MemoryListener *listener; |
696 |
|
697 |
QLIST_FOREACH(listener, &memory_listeners, link) { |
698 |
ListenerCallback *callback |
699 |
= *(ListenerCallback **)((void *)listener + callback_offset);
|
700 |
callback(listener, §ion); |
701 |
} |
702 |
} |
703 |
|
704 |
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, callback) \
|
705 |
memory_listener_update_region(fr, as, offsetof(MemoryListener, callback)) |
706 |
|
707 |
static void address_space_update_topology_pass(AddressSpace *as, |
708 |
FlatView old_view, |
709 |
FlatView new_view, |
710 |
bool adding)
|
711 |
{ |
712 |
unsigned iold, inew;
|
713 |
FlatRange *frold, *frnew; |
714 |
|
715 |
/* Generate a symmetric difference of the old and new memory maps.
|
716 |
* Kill ranges in the old map, and instantiate ranges in the new map.
|
717 |
*/
|
718 |
iold = inew = 0;
|
719 |
while (iold < old_view.nr || inew < new_view.nr) {
|
720 |
if (iold < old_view.nr) {
|
721 |
frold = &old_view.ranges[iold]; |
722 |
} else {
|
723 |
frold = NULL;
|
724 |
} |
725 |
if (inew < new_view.nr) {
|
726 |
frnew = &new_view.ranges[inew]; |
727 |
} else {
|
728 |
frnew = NULL;
|
729 |
} |
730 |
|
731 |
if (frold
|
732 |
&& (!frnew |
733 |
|| int128_lt(frold->addr.start, frnew->addr.start) |
734 |
|| (int128_eq(frold->addr.start, frnew->addr.start) |
735 |
&& !flatrange_equal(frold, frnew)))) { |
736 |
/* In old, but (not in new, or in new but attributes changed). */
|
737 |
|
738 |
if (!adding) {
|
739 |
MEMORY_LISTENER_UPDATE_REGION(frold, as, region_del); |
740 |
as->ops->range_del(as, frold); |
741 |
} |
742 |
|
743 |
++iold; |
744 |
} else if (frold && frnew && flatrange_equal(frold, frnew)) { |
745 |
/* In both (logging may have changed) */
|
746 |
|
747 |
if (adding) {
|
748 |
if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
|
749 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, log_stop); |
750 |
as->ops->log_stop(as, frnew); |
751 |
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
752 |
as->ops->log_start(as, frnew); |
753 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, log_start); |
754 |
} |
755 |
} |
756 |
|
757 |
++iold; |
758 |
++inew; |
759 |
} else {
|
760 |
/* In new */
|
761 |
|
762 |
if (adding) {
|
763 |
as->ops->range_add(as, frnew); |
764 |
MEMORY_LISTENER_UPDATE_REGION(frnew, as, region_add); |
765 |
} |
766 |
|
767 |
++inew; |
768 |
} |
769 |
} |
770 |
} |
771 |
|
772 |
|
773 |
static void address_space_update_topology(AddressSpace *as) |
774 |
{ |
775 |
FlatView old_view = as->current_map; |
776 |
FlatView new_view = generate_memory_topology(as->root); |
777 |
|
778 |
address_space_update_topology_pass(as, old_view, new_view, false);
|
779 |
address_space_update_topology_pass(as, old_view, new_view, true);
|
780 |
|
781 |
as->current_map = new_view; |
782 |
flatview_destroy(&old_view); |
783 |
address_space_update_ioeventfds(as); |
784 |
} |
785 |
|
786 |
static void memory_region_update_topology(MemoryRegion *mr) |
787 |
{ |
788 |
if (memory_region_transaction_depth) {
|
789 |
memory_region_update_pending |= !mr || mr->enabled; |
790 |
return;
|
791 |
} |
792 |
|
793 |
if (mr && !mr->enabled) {
|
794 |
return;
|
795 |
} |
796 |
|
797 |
if (address_space_memory.root) {
|
798 |
address_space_update_topology(&address_space_memory); |
799 |
} |
800 |
if (address_space_io.root) {
|
801 |
address_space_update_topology(&address_space_io); |
802 |
} |
803 |
|
804 |
memory_region_update_pending = false;
|
805 |
} |
806 |
|
807 |
void memory_region_transaction_begin(void) |
808 |
{ |
809 |
++memory_region_transaction_depth; |
810 |
} |
811 |
|
812 |
void memory_region_transaction_commit(void) |
813 |
{ |
814 |
assert(memory_region_transaction_depth); |
815 |
--memory_region_transaction_depth; |
816 |
if (!memory_region_transaction_depth && memory_region_update_pending) {
|
817 |
memory_region_update_topology(NULL);
|
818 |
} |
819 |
} |
820 |
|
821 |
static void memory_region_destructor_none(MemoryRegion *mr) |
822 |
{ |
823 |
} |
824 |
|
825 |
static void memory_region_destructor_ram(MemoryRegion *mr) |
826 |
{ |
827 |
qemu_ram_free(mr->ram_addr); |
828 |
} |
829 |
|
830 |
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) |
831 |
{ |
832 |
qemu_ram_free_from_ptr(mr->ram_addr); |
833 |
} |
834 |
|
835 |
static void memory_region_destructor_iomem(MemoryRegion *mr) |
836 |
{ |
837 |
cpu_unregister_io_memory(mr->ram_addr); |
838 |
} |
839 |
|
840 |
static void memory_region_destructor_rom_device(MemoryRegion *mr) |
841 |
{ |
842 |
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); |
843 |
cpu_unregister_io_memory(mr->ram_addr & ~TARGET_PAGE_MASK); |
844 |
} |
845 |
|
846 |
static bool memory_region_wrong_endianness(MemoryRegion *mr) |
847 |
{ |
848 |
#ifdef TARGET_WORDS_BIGENDIAN
|
849 |
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
|
850 |
#else
|
851 |
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
|
852 |
#endif
|
853 |
} |
854 |
|
855 |
void memory_region_init(MemoryRegion *mr,
|
856 |
const char *name, |
857 |
uint64_t size) |
858 |
{ |
859 |
mr->ops = NULL;
|
860 |
mr->parent = NULL;
|
861 |
mr->size = int128_make64(size); |
862 |
if (size == UINT64_MAX) {
|
863 |
mr->size = int128_2_64(); |
864 |
} |
865 |
mr->addr = 0;
|
866 |
mr->offset = 0;
|
867 |
mr->subpage = false;
|
868 |
mr->enabled = true;
|
869 |
mr->terminates = false;
|
870 |
mr->ram = false;
|
871 |
mr->readable = true;
|
872 |
mr->readonly = false;
|
873 |
mr->rom_device = false;
|
874 |
mr->destructor = memory_region_destructor_none; |
875 |
mr->priority = 0;
|
876 |
mr->may_overlap = false;
|
877 |
mr->alias = NULL;
|
878 |
QTAILQ_INIT(&mr->subregions); |
879 |
memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
880 |
QTAILQ_INIT(&mr->coalesced); |
881 |
mr->name = g_strdup(name); |
882 |
mr->dirty_log_mask = 0;
|
883 |
mr->ioeventfd_nb = 0;
|
884 |
mr->ioeventfds = NULL;
|
885 |
} |
886 |
|
887 |
static bool memory_region_access_valid(MemoryRegion *mr, |
888 |
target_phys_addr_t addr, |
889 |
unsigned size,
|
890 |
bool is_write)
|
891 |
{ |
892 |
if (mr->ops->valid.accepts
|
893 |
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write)) { |
894 |
return false; |
895 |
} |
896 |
|
897 |
if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
898 |
return false; |
899 |
} |
900 |
|
901 |
/* Treat zero as compatibility all valid */
|
902 |
if (!mr->ops->valid.max_access_size) {
|
903 |
return true; |
904 |
} |
905 |
|
906 |
if (size > mr->ops->valid.max_access_size
|
907 |
|| size < mr->ops->valid.min_access_size) { |
908 |
return false; |
909 |
} |
910 |
return true; |
911 |
} |
912 |
|
913 |
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
|
914 |
target_phys_addr_t addr, |
915 |
unsigned size)
|
916 |
{ |
917 |
uint64_t data = 0;
|
918 |
|
919 |
if (!memory_region_access_valid(mr, addr, size, false)) { |
920 |
return -1U; /* FIXME: better signalling */ |
921 |
} |
922 |
|
923 |
if (!mr->ops->read) {
|
924 |
return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
|
925 |
} |
926 |
|
927 |
/* FIXME: support unaligned access */
|
928 |
access_with_adjusted_size(addr + mr->offset, &data, size, |
929 |
mr->ops->impl.min_access_size, |
930 |
mr->ops->impl.max_access_size, |
931 |
memory_region_read_accessor, mr); |
932 |
|
933 |
return data;
|
934 |
} |
935 |
|
936 |
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) |
937 |
{ |
938 |
if (memory_region_wrong_endianness(mr)) {
|
939 |
switch (size) {
|
940 |
case 1: |
941 |
break;
|
942 |
case 2: |
943 |
*data = bswap16(*data); |
944 |
break;
|
945 |
case 4: |
946 |
*data = bswap32(*data); |
947 |
break;
|
948 |
default:
|
949 |
abort(); |
950 |
} |
951 |
} |
952 |
} |
953 |
|
954 |
static uint64_t memory_region_dispatch_read(MemoryRegion *mr,
|
955 |
target_phys_addr_t addr, |
956 |
unsigned size)
|
957 |
{ |
958 |
uint64_t ret; |
959 |
|
960 |
ret = memory_region_dispatch_read1(mr, addr, size); |
961 |
adjust_endianness(mr, &ret, size); |
962 |
return ret;
|
963 |
} |
964 |
|
965 |
static void memory_region_dispatch_write(MemoryRegion *mr, |
966 |
target_phys_addr_t addr, |
967 |
uint64_t data, |
968 |
unsigned size)
|
969 |
{ |
970 |
if (!memory_region_access_valid(mr, addr, size, true)) { |
971 |
return; /* FIXME: better signalling */ |
972 |
} |
973 |
|
974 |
adjust_endianness(mr, &data, size); |
975 |
|
976 |
if (!mr->ops->write) {
|
977 |
mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data); |
978 |
return;
|
979 |
} |
980 |
|
981 |
/* FIXME: support unaligned access */
|
982 |
access_with_adjusted_size(addr + mr->offset, &data, size, |
983 |
mr->ops->impl.min_access_size, |
984 |
mr->ops->impl.max_access_size, |
985 |
memory_region_write_accessor, mr); |
986 |
} |
987 |
|
988 |
void memory_region_init_io(MemoryRegion *mr,
|
989 |
const MemoryRegionOps *ops,
|
990 |
void *opaque,
|
991 |
const char *name, |
992 |
uint64_t size) |
993 |
{ |
994 |
memory_region_init(mr, name, size); |
995 |
mr->ops = ops; |
996 |
mr->opaque = opaque; |
997 |
mr->terminates = true;
|
998 |
mr->destructor = memory_region_destructor_iomem; |
999 |
mr->ram_addr = cpu_register_io_memory(mr); |
1000 |
} |
1001 |
|
1002 |
void memory_region_init_ram(MemoryRegion *mr,
|
1003 |
const char *name, |
1004 |
uint64_t size) |
1005 |
{ |
1006 |
memory_region_init(mr, name, size); |
1007 |
mr->ram = true;
|
1008 |
mr->terminates = true;
|
1009 |
mr->destructor = memory_region_destructor_ram; |
1010 |
mr->ram_addr = qemu_ram_alloc(size, mr); |
1011 |
} |
1012 |
|
1013 |
void memory_region_init_ram_ptr(MemoryRegion *mr,
|
1014 |
const char *name, |
1015 |
uint64_t size, |
1016 |
void *ptr)
|
1017 |
{ |
1018 |
memory_region_init(mr, name, size); |
1019 |
mr->ram = true;
|
1020 |
mr->terminates = true;
|
1021 |
mr->destructor = memory_region_destructor_ram_from_ptr; |
1022 |
mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr); |
1023 |
} |
1024 |
|
1025 |
void memory_region_init_alias(MemoryRegion *mr,
|
1026 |
const char *name, |
1027 |
MemoryRegion *orig, |
1028 |
target_phys_addr_t offset, |
1029 |
uint64_t size) |
1030 |
{ |
1031 |
memory_region_init(mr, name, size); |
1032 |
mr->alias = orig; |
1033 |
mr->alias_offset = offset; |
1034 |
} |
1035 |
|
1036 |
void memory_region_init_rom_device(MemoryRegion *mr,
|
1037 |
const MemoryRegionOps *ops,
|
1038 |
void *opaque,
|
1039 |
const char *name, |
1040 |
uint64_t size) |
1041 |
{ |
1042 |
memory_region_init(mr, name, size); |
1043 |
mr->ops = ops; |
1044 |
mr->opaque = opaque; |
1045 |
mr->terminates = true;
|
1046 |
mr->rom_device = true;
|
1047 |
mr->destructor = memory_region_destructor_rom_device; |
1048 |
mr->ram_addr = qemu_ram_alloc(size, mr); |
1049 |
mr->ram_addr |= cpu_register_io_memory(mr); |
1050 |
} |
1051 |
|
1052 |
static uint64_t invalid_read(void *opaque, target_phys_addr_t addr, |
1053 |
unsigned size)
|
1054 |
{ |
1055 |
MemoryRegion *mr = opaque; |
1056 |
|
1057 |
if (!mr->warning_printed) {
|
1058 |
fprintf(stderr, "Invalid read from memory region %s\n", mr->name);
|
1059 |
mr->warning_printed = true;
|
1060 |
} |
1061 |
return -1U; |
1062 |
} |
1063 |
|
1064 |
static void invalid_write(void *opaque, target_phys_addr_t addr, uint64_t data, |
1065 |
unsigned size)
|
1066 |
{ |
1067 |
MemoryRegion *mr = opaque; |
1068 |
|
1069 |
if (!mr->warning_printed) {
|
1070 |
fprintf(stderr, "Invalid write to memory region %s\n", mr->name);
|
1071 |
mr->warning_printed = true;
|
1072 |
} |
1073 |
} |
1074 |
|
1075 |
static const MemoryRegionOps reservation_ops = { |
1076 |
.read = invalid_read, |
1077 |
.write = invalid_write, |
1078 |
.endianness = DEVICE_NATIVE_ENDIAN, |
1079 |
}; |
1080 |
|
1081 |
void memory_region_init_reservation(MemoryRegion *mr,
|
1082 |
const char *name, |
1083 |
uint64_t size) |
1084 |
{ |
1085 |
memory_region_init_io(mr, &reservation_ops, mr, name, size); |
1086 |
} |
1087 |
|
1088 |
void memory_region_destroy(MemoryRegion *mr)
|
1089 |
{ |
1090 |
assert(QTAILQ_EMPTY(&mr->subregions)); |
1091 |
mr->destructor(mr); |
1092 |
memory_region_clear_coalescing(mr); |
1093 |
g_free((char *)mr->name);
|
1094 |
g_free(mr->ioeventfds); |
1095 |
} |
1096 |
|
1097 |
uint64_t memory_region_size(MemoryRegion *mr) |
1098 |
{ |
1099 |
if (int128_eq(mr->size, int128_2_64())) {
|
1100 |
return UINT64_MAX;
|
1101 |
} |
1102 |
return int128_get64(mr->size);
|
1103 |
} |
1104 |
|
1105 |
const char *memory_region_name(MemoryRegion *mr) |
1106 |
{ |
1107 |
return mr->name;
|
1108 |
} |
1109 |
|
1110 |
bool memory_region_is_ram(MemoryRegion *mr)
|
1111 |
{ |
1112 |
return mr->ram;
|
1113 |
} |
1114 |
|
1115 |
bool memory_region_is_logging(MemoryRegion *mr)
|
1116 |
{ |
1117 |
return mr->dirty_log_mask;
|
1118 |
} |
1119 |
|
1120 |
bool memory_region_is_rom(MemoryRegion *mr)
|
1121 |
{ |
1122 |
return mr->ram && mr->readonly;
|
1123 |
} |
1124 |
|
1125 |
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
|
1126 |
{ |
1127 |
mr->offset = offset; |
1128 |
} |
1129 |
|
1130 |
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
1131 |
{ |
1132 |
uint8_t mask = 1 << client;
|
1133 |
|
1134 |
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
1135 |
memory_region_update_topology(mr); |
1136 |
} |
1137 |
|
1138 |
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1139 |
target_phys_addr_t size, unsigned client)
|
1140 |
{ |
1141 |
assert(mr->terminates); |
1142 |
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
|
1143 |
1 << client);
|
1144 |
} |
1145 |
|
1146 |
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1147 |
target_phys_addr_t size) |
1148 |
{ |
1149 |
assert(mr->terminates); |
1150 |
return cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1); |
1151 |
} |
1152 |
|
1153 |
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
|
1154 |
{ |
1155 |
FlatRange *fr; |
1156 |
|
1157 |
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
1158 |
if (fr->mr == mr) {
|
1159 |
MEMORY_LISTENER_UPDATE_REGION(fr, &address_space_memory, log_sync); |
1160 |
} |
1161 |
} |
1162 |
} |
1163 |
|
1164 |
void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
1165 |
{ |
1166 |
if (mr->readonly != readonly) {
|
1167 |
mr->readonly = readonly; |
1168 |
memory_region_update_topology(mr); |
1169 |
} |
1170 |
} |
1171 |
|
1172 |
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable) |
1173 |
{ |
1174 |
if (mr->readable != readable) {
|
1175 |
mr->readable = readable; |
1176 |
memory_region_update_topology(mr); |
1177 |
} |
1178 |
} |
1179 |
|
1180 |
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1181 |
target_phys_addr_t size, unsigned client)
|
1182 |
{ |
1183 |
assert(mr->terminates); |
1184 |
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
1185 |
mr->ram_addr + addr + size, |
1186 |
1 << client);
|
1187 |
} |
1188 |
|
1189 |
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
1190 |
{ |
1191 |
if (mr->alias) {
|
1192 |
return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
|
1193 |
} |
1194 |
|
1195 |
assert(mr->terminates); |
1196 |
|
1197 |
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
|
1198 |
} |
1199 |
|
1200 |
static void memory_region_update_coalesced_range(MemoryRegion *mr) |
1201 |
{ |
1202 |
FlatRange *fr; |
1203 |
CoalescedMemoryRange *cmr; |
1204 |
AddrRange tmp; |
1205 |
|
1206 |
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
1207 |
if (fr->mr == mr) {
|
1208 |
qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start), |
1209 |
int128_get64(fr->addr.size)); |
1210 |
QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
1211 |
tmp = addrrange_shift(cmr->addr, |
1212 |
int128_sub(fr->addr.start, |
1213 |
int128_make64(fr->offset_in_region))); |
1214 |
if (!addrrange_intersects(tmp, fr->addr)) {
|
1215 |
continue;
|
1216 |
} |
1217 |
tmp = addrrange_intersection(tmp, fr->addr); |
1218 |
qemu_register_coalesced_mmio(int128_get64(tmp.start), |
1219 |
int128_get64(tmp.size)); |
1220 |
} |
1221 |
} |
1222 |
} |
1223 |
} |
1224 |
|
1225 |
void memory_region_set_coalescing(MemoryRegion *mr)
|
1226 |
{ |
1227 |
memory_region_clear_coalescing(mr); |
1228 |
memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
|
1229 |
} |
1230 |
|
1231 |
void memory_region_add_coalescing(MemoryRegion *mr,
|
1232 |
target_phys_addr_t offset, |
1233 |
uint64_t size) |
1234 |
{ |
1235 |
CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
|
1236 |
|
1237 |
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); |
1238 |
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
1239 |
memory_region_update_coalesced_range(mr); |
1240 |
} |
1241 |
|
1242 |
void memory_region_clear_coalescing(MemoryRegion *mr)
|
1243 |
{ |
1244 |
CoalescedMemoryRange *cmr; |
1245 |
|
1246 |
while (!QTAILQ_EMPTY(&mr->coalesced)) {
|
1247 |
cmr = QTAILQ_FIRST(&mr->coalesced); |
1248 |
QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
1249 |
g_free(cmr); |
1250 |
} |
1251 |
memory_region_update_coalesced_range(mr); |
1252 |
} |
1253 |
|
1254 |
void memory_region_add_eventfd(MemoryRegion *mr,
|
1255 |
target_phys_addr_t addr, |
1256 |
unsigned size,
|
1257 |
bool match_data,
|
1258 |
uint64_t data, |
1259 |
int fd)
|
1260 |
{ |
1261 |
MemoryRegionIoeventfd mrfd = { |
1262 |
.addr.start = int128_make64(addr), |
1263 |
.addr.size = int128_make64(size), |
1264 |
.match_data = match_data, |
1265 |
.data = data, |
1266 |
.fd = fd, |
1267 |
}; |
1268 |
unsigned i;
|
1269 |
|
1270 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1271 |
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
|
1272 |
break;
|
1273 |
} |
1274 |
} |
1275 |
++mr->ioeventfd_nb; |
1276 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1277 |
sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
|
1278 |
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
|
1279 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); |
1280 |
mr->ioeventfds[i] = mrfd; |
1281 |
memory_region_update_topology(mr); |
1282 |
} |
1283 |
|
1284 |
void memory_region_del_eventfd(MemoryRegion *mr,
|
1285 |
target_phys_addr_t addr, |
1286 |
unsigned size,
|
1287 |
bool match_data,
|
1288 |
uint64_t data, |
1289 |
int fd)
|
1290 |
{ |
1291 |
MemoryRegionIoeventfd mrfd = { |
1292 |
.addr.start = int128_make64(addr), |
1293 |
.addr.size = int128_make64(size), |
1294 |
.match_data = match_data, |
1295 |
.data = data, |
1296 |
.fd = fd, |
1297 |
}; |
1298 |
unsigned i;
|
1299 |
|
1300 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1301 |
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
|
1302 |
break;
|
1303 |
} |
1304 |
} |
1305 |
assert(i != mr->ioeventfd_nb); |
1306 |
memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
|
1307 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); |
1308 |
--mr->ioeventfd_nb; |
1309 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1310 |
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); |
1311 |
memory_region_update_topology(mr); |
1312 |
} |
1313 |
|
1314 |
static void memory_region_add_subregion_common(MemoryRegion *mr, |
1315 |
target_phys_addr_t offset, |
1316 |
MemoryRegion *subregion) |
1317 |
{ |
1318 |
MemoryRegion *other; |
1319 |
|
1320 |
assert(!subregion->parent); |
1321 |
subregion->parent = mr; |
1322 |
subregion->addr = offset; |
1323 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1324 |
if (subregion->may_overlap || other->may_overlap) {
|
1325 |
continue;
|
1326 |
} |
1327 |
if (int128_gt(int128_make64(offset),
|
1328 |
int128_add(int128_make64(other->addr), other->size)) |
1329 |
|| int128_le(int128_add(int128_make64(offset), subregion->size), |
1330 |
int128_make64(other->addr))) { |
1331 |
continue;
|
1332 |
} |
1333 |
#if 0
|
1334 |
printf("warning: subregion collision %llx/%llx (%s) "
|
1335 |
"vs %llx/%llx (%s)\n",
|
1336 |
(unsigned long long)offset,
|
1337 |
(unsigned long long)int128_get64(subregion->size),
|
1338 |
subregion->name,
|
1339 |
(unsigned long long)other->addr,
|
1340 |
(unsigned long long)int128_get64(other->size),
|
1341 |
other->name);
|
1342 |
#endif
|
1343 |
} |
1344 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1345 |
if (subregion->priority >= other->priority) {
|
1346 |
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
1347 |
goto done;
|
1348 |
} |
1349 |
} |
1350 |
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
1351 |
done:
|
1352 |
memory_region_update_topology(mr); |
1353 |
} |
1354 |
|
1355 |
|
1356 |
void memory_region_add_subregion(MemoryRegion *mr,
|
1357 |
target_phys_addr_t offset, |
1358 |
MemoryRegion *subregion) |
1359 |
{ |
1360 |
subregion->may_overlap = false;
|
1361 |
subregion->priority = 0;
|
1362 |
memory_region_add_subregion_common(mr, offset, subregion); |
1363 |
} |
1364 |
|
1365 |
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
1366 |
target_phys_addr_t offset, |
1367 |
MemoryRegion *subregion, |
1368 |
unsigned priority)
|
1369 |
{ |
1370 |
subregion->may_overlap = true;
|
1371 |
subregion->priority = priority; |
1372 |
memory_region_add_subregion_common(mr, offset, subregion); |
1373 |
} |
1374 |
|
1375 |
void memory_region_del_subregion(MemoryRegion *mr,
|
1376 |
MemoryRegion *subregion) |
1377 |
{ |
1378 |
assert(subregion->parent == mr); |
1379 |
subregion->parent = NULL;
|
1380 |
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
1381 |
memory_region_update_topology(mr); |
1382 |
} |
1383 |
|
1384 |
void memory_region_set_enabled(MemoryRegion *mr, bool enabled) |
1385 |
{ |
1386 |
if (enabled == mr->enabled) {
|
1387 |
return;
|
1388 |
} |
1389 |
mr->enabled = enabled; |
1390 |
memory_region_update_topology(NULL);
|
1391 |
} |
1392 |
|
1393 |
void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
|
1394 |
{ |
1395 |
MemoryRegion *parent = mr->parent; |
1396 |
unsigned priority = mr->priority;
|
1397 |
bool may_overlap = mr->may_overlap;
|
1398 |
|
1399 |
if (addr == mr->addr || !parent) {
|
1400 |
mr->addr = addr; |
1401 |
return;
|
1402 |
} |
1403 |
|
1404 |
memory_region_transaction_begin(); |
1405 |
memory_region_del_subregion(parent, mr); |
1406 |
if (may_overlap) {
|
1407 |
memory_region_add_subregion_overlap(parent, addr, mr, priority); |
1408 |
} else {
|
1409 |
memory_region_add_subregion(parent, addr, mr); |
1410 |
} |
1411 |
memory_region_transaction_commit(); |
1412 |
} |
1413 |
|
1414 |
void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
|
1415 |
{ |
1416 |
target_phys_addr_t old_offset = mr->alias_offset; |
1417 |
|
1418 |
assert(mr->alias); |
1419 |
mr->alias_offset = offset; |
1420 |
|
1421 |
if (offset == old_offset || !mr->parent) {
|
1422 |
return;
|
1423 |
} |
1424 |
|
1425 |
memory_region_update_topology(mr); |
1426 |
} |
1427 |
|
1428 |
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) |
1429 |
{ |
1430 |
return mr->ram_addr;
|
1431 |
} |
1432 |
|
1433 |
static int cmp_flatrange_addr(const void *addr_, const void *fr_) |
1434 |
{ |
1435 |
const AddrRange *addr = addr_;
|
1436 |
const FlatRange *fr = fr_;
|
1437 |
|
1438 |
if (int128_le(addrrange_end(*addr), fr->addr.start)) {
|
1439 |
return -1; |
1440 |
} else if (int128_ge(addr->start, addrrange_end(fr->addr))) { |
1441 |
return 1; |
1442 |
} |
1443 |
return 0; |
1444 |
} |
1445 |
|
1446 |
static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
|
1447 |
{ |
1448 |
return bsearch(&addr, as->current_map.ranges, as->current_map.nr,
|
1449 |
sizeof(FlatRange), cmp_flatrange_addr);
|
1450 |
} |
1451 |
|
1452 |
MemoryRegionSection memory_region_find(MemoryRegion *address_space, |
1453 |
target_phys_addr_t addr, uint64_t size) |
1454 |
{ |
1455 |
AddressSpace *as = memory_region_to_address_space(address_space); |
1456 |
AddrRange range = addrrange_make(int128_make64(addr), |
1457 |
int128_make64(size)); |
1458 |
FlatRange *fr = address_space_lookup(as, range); |
1459 |
MemoryRegionSection ret = { .mr = NULL, .size = 0 }; |
1460 |
|
1461 |
if (!fr) {
|
1462 |
return ret;
|
1463 |
} |
1464 |
|
1465 |
while (fr > as->current_map.ranges
|
1466 |
&& addrrange_intersects(fr[-1].addr, range)) {
|
1467 |
--fr; |
1468 |
} |
1469 |
|
1470 |
ret.mr = fr->mr; |
1471 |
range = addrrange_intersection(range, fr->addr); |
1472 |
ret.offset_within_region = fr->offset_in_region; |
1473 |
ret.offset_within_region += int128_get64(int128_sub(range.start, |
1474 |
fr->addr.start)); |
1475 |
ret.size = int128_get64(range.size); |
1476 |
ret.offset_within_address_space = int128_get64(range.start); |
1477 |
return ret;
|
1478 |
} |
1479 |
|
1480 |
void memory_global_sync_dirty_bitmap(MemoryRegion *address_space)
|
1481 |
{ |
1482 |
AddressSpace *as = memory_region_to_address_space(address_space); |
1483 |
FlatRange *fr; |
1484 |
|
1485 |
FOR_EACH_FLAT_RANGE(fr, &as->current_map) { |
1486 |
MEMORY_LISTENER_UPDATE_REGION(fr, as, log_sync); |
1487 |
} |
1488 |
} |
1489 |
|
1490 |
void memory_global_dirty_log_start(void) |
1491 |
{ |
1492 |
MemoryListener *listener; |
1493 |
|
1494 |
cpu_physical_memory_set_dirty_tracking(1);
|
1495 |
global_dirty_log = true;
|
1496 |
QLIST_FOREACH(listener, &memory_listeners, link) { |
1497 |
listener->log_global_start(listener); |
1498 |
} |
1499 |
} |
1500 |
|
1501 |
void memory_global_dirty_log_stop(void) |
1502 |
{ |
1503 |
MemoryListener *listener; |
1504 |
|
1505 |
global_dirty_log = false;
|
1506 |
QLIST_FOREACH(listener, &memory_listeners, link) { |
1507 |
listener->log_global_stop(listener); |
1508 |
} |
1509 |
cpu_physical_memory_set_dirty_tracking(0);
|
1510 |
} |
1511 |
|
1512 |
static void listener_add_address_space(MemoryListener *listener, |
1513 |
AddressSpace *as) |
1514 |
{ |
1515 |
FlatRange *fr; |
1516 |
|
1517 |
if (global_dirty_log) {
|
1518 |
listener->log_global_start(listener); |
1519 |
} |
1520 |
FOR_EACH_FLAT_RANGE(fr, &as->current_map) { |
1521 |
MemoryRegionSection section = { |
1522 |
.mr = fr->mr, |
1523 |
.address_space = as->root, |
1524 |
.offset_within_region = fr->offset_in_region, |
1525 |
.size = int128_get64(fr->addr.size), |
1526 |
.offset_within_address_space = int128_get64(fr->addr.start), |
1527 |
}; |
1528 |
listener->region_add(listener, §ion); |
1529 |
} |
1530 |
} |
1531 |
|
1532 |
void memory_listener_register(MemoryListener *listener)
|
1533 |
{ |
1534 |
QLIST_INSERT_HEAD(&memory_listeners, listener, link); |
1535 |
listener_add_address_space(listener, &address_space_memory); |
1536 |
listener_add_address_space(listener, &address_space_io); |
1537 |
} |
1538 |
|
1539 |
void memory_listener_unregister(MemoryListener *listener)
|
1540 |
{ |
1541 |
QLIST_REMOVE(listener, link); |
1542 |
} |
1543 |
|
1544 |
void set_system_memory_map(MemoryRegion *mr)
|
1545 |
{ |
1546 |
address_space_memory.root = mr; |
1547 |
memory_region_update_topology(NULL);
|
1548 |
} |
1549 |
|
1550 |
void set_system_io_map(MemoryRegion *mr)
|
1551 |
{ |
1552 |
address_space_io.root = mr; |
1553 |
memory_region_update_topology(NULL);
|
1554 |
} |
1555 |
|
1556 |
uint64_t io_mem_read(int io_index, target_phys_addr_t addr, unsigned size) |
1557 |
{ |
1558 |
return memory_region_dispatch_read(io_mem_region[io_index], addr, size);
|
1559 |
} |
1560 |
|
1561 |
void io_mem_write(int io_index, target_phys_addr_t addr, |
1562 |
uint64_t val, unsigned size)
|
1563 |
{ |
1564 |
memory_region_dispatch_write(io_mem_region[io_index], addr, val, size); |
1565 |
} |
1566 |
|
1567 |
typedef struct MemoryRegionList MemoryRegionList; |
1568 |
|
1569 |
struct MemoryRegionList {
|
1570 |
const MemoryRegion *mr;
|
1571 |
bool printed;
|
1572 |
QTAILQ_ENTRY(MemoryRegionList) queue; |
1573 |
}; |
1574 |
|
1575 |
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
|
1576 |
|
1577 |
static void mtree_print_mr(fprintf_function mon_printf, void *f, |
1578 |
const MemoryRegion *mr, unsigned int level, |
1579 |
target_phys_addr_t base, |
1580 |
MemoryRegionListHead *alias_print_queue) |
1581 |
{ |
1582 |
MemoryRegionList *new_ml, *ml, *next_ml; |
1583 |
MemoryRegionListHead submr_print_queue; |
1584 |
const MemoryRegion *submr;
|
1585 |
unsigned int i; |
1586 |
|
1587 |
if (!mr) {
|
1588 |
return;
|
1589 |
} |
1590 |
|
1591 |
for (i = 0; i < level; i++) { |
1592 |
mon_printf(f, " ");
|
1593 |
} |
1594 |
|
1595 |
if (mr->alias) {
|
1596 |
MemoryRegionList *ml; |
1597 |
bool found = false; |
1598 |
|
1599 |
/* check if the alias is already in the queue */
|
1600 |
QTAILQ_FOREACH(ml, alias_print_queue, queue) { |
1601 |
if (ml->mr == mr->alias && !ml->printed) {
|
1602 |
found = true;
|
1603 |
} |
1604 |
} |
1605 |
|
1606 |
if (!found) {
|
1607 |
ml = g_new(MemoryRegionList, 1);
|
1608 |
ml->mr = mr->alias; |
1609 |
ml->printed = false;
|
1610 |
QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue); |
1611 |
} |
1612 |
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s " |
1613 |
TARGET_FMT_plx "-" TARGET_FMT_plx "\n", |
1614 |
base + mr->addr, |
1615 |
base + mr->addr |
1616 |
+ (target_phys_addr_t)int128_get64(mr->size) - 1,
|
1617 |
mr->priority, |
1618 |
mr->name, |
1619 |
mr->alias->name, |
1620 |
mr->alias_offset, |
1621 |
mr->alias_offset |
1622 |
+ (target_phys_addr_t)int128_get64(mr->size) - 1);
|
1623 |
} else {
|
1624 |
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n", |
1625 |
base + mr->addr, |
1626 |
base + mr->addr |
1627 |
+ (target_phys_addr_t)int128_get64(mr->size) - 1,
|
1628 |
mr->priority, |
1629 |
mr->name); |
1630 |
} |
1631 |
|
1632 |
QTAILQ_INIT(&submr_print_queue); |
1633 |
|
1634 |
QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { |
1635 |
new_ml = g_new(MemoryRegionList, 1);
|
1636 |
new_ml->mr = submr; |
1637 |
QTAILQ_FOREACH(ml, &submr_print_queue, queue) { |
1638 |
if (new_ml->mr->addr < ml->mr->addr ||
|
1639 |
(new_ml->mr->addr == ml->mr->addr && |
1640 |
new_ml->mr->priority > ml->mr->priority)) { |
1641 |
QTAILQ_INSERT_BEFORE(ml, new_ml, queue); |
1642 |
new_ml = NULL;
|
1643 |
break;
|
1644 |
} |
1645 |
} |
1646 |
if (new_ml) {
|
1647 |
QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue); |
1648 |
} |
1649 |
} |
1650 |
|
1651 |
QTAILQ_FOREACH(ml, &submr_print_queue, queue) { |
1652 |
mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
|
1653 |
alias_print_queue); |
1654 |
} |
1655 |
|
1656 |
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) { |
1657 |
g_free(ml); |
1658 |
} |
1659 |
} |
1660 |
|
1661 |
void mtree_info(fprintf_function mon_printf, void *f) |
1662 |
{ |
1663 |
MemoryRegionListHead ml_head; |
1664 |
MemoryRegionList *ml, *ml2; |
1665 |
|
1666 |
QTAILQ_INIT(&ml_head); |
1667 |
|
1668 |
mon_printf(f, "memory\n");
|
1669 |
mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head); |
1670 |
|
1671 |
/* print aliased regions */
|
1672 |
QTAILQ_FOREACH(ml, &ml_head, queue) { |
1673 |
if (!ml->printed) {
|
1674 |
mon_printf(f, "%s\n", ml->mr->name);
|
1675 |
mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); |
1676 |
} |
1677 |
} |
1678 |
|
1679 |
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) { |
1680 |
g_free(ml); |
1681 |
} |
1682 |
|
1683 |
if (address_space_io.root &&
|
1684 |
!QTAILQ_EMPTY(&address_space_io.root->subregions)) { |
1685 |
QTAILQ_INIT(&ml_head); |
1686 |
mon_printf(f, "I/O\n");
|
1687 |
mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head); |
1688 |
} |
1689 |
} |