root / memory.c @ 06631810
History | View | Annotate | Download (39.8 kB)
1 |
/*
|
---|---|
2 |
* Physical memory management
|
3 |
*
|
4 |
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
5 |
*
|
6 |
* Authors:
|
7 |
* Avi Kivity <avi@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "memory.h" |
15 |
#include "exec-memory.h" |
16 |
#include "ioport.h" |
17 |
#include "bitops.h" |
18 |
#include "kvm.h" |
19 |
#include <assert.h> |
20 |
|
21 |
unsigned memory_region_transaction_depth = 0; |
22 |
|
23 |
typedef struct AddrRange AddrRange; |
24 |
|
25 |
/*
|
26 |
* Note using signed integers limits us to physical addresses at most
|
27 |
* 63 bits wide. They are needed for negative offsetting in aliases
|
28 |
* (large MemoryRegion::alias_offset).
|
29 |
*/
|
30 |
struct AddrRange {
|
31 |
int64_t start; |
32 |
int64_t size; |
33 |
}; |
34 |
|
35 |
static AddrRange addrrange_make(int64_t start, int64_t size)
|
36 |
{ |
37 |
return (AddrRange) { start, size };
|
38 |
} |
39 |
|
40 |
static bool addrrange_equal(AddrRange r1, AddrRange r2) |
41 |
{ |
42 |
return r1.start == r2.start && r1.size == r2.size;
|
43 |
} |
44 |
|
45 |
static int64_t addrrange_end(AddrRange r)
|
46 |
{ |
47 |
return r.start + r.size;
|
48 |
} |
49 |
|
50 |
static AddrRange addrrange_shift(AddrRange range, int64_t delta)
|
51 |
{ |
52 |
range.start += delta; |
53 |
return range;
|
54 |
} |
55 |
|
56 |
static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
57 |
{ |
58 |
return (r1.start >= r2.start && (r1.start - r2.start) < r2.size)
|
59 |
|| (r2.start >= r1.start && (r2.start - r1.start) < r1.size); |
60 |
} |
61 |
|
62 |
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
63 |
{ |
64 |
int64_t start = MAX(r1.start, r2.start); |
65 |
/* off-by-one arithmetic to prevent overflow */
|
66 |
int64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); |
67 |
return addrrange_make(start, end - start + 1); |
68 |
} |
69 |
|
70 |
struct CoalescedMemoryRange {
|
71 |
AddrRange addr; |
72 |
QTAILQ_ENTRY(CoalescedMemoryRange) link; |
73 |
}; |
74 |
|
75 |
struct MemoryRegionIoeventfd {
|
76 |
AddrRange addr; |
77 |
bool match_data;
|
78 |
uint64_t data; |
79 |
int fd;
|
80 |
}; |
81 |
|
82 |
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, |
83 |
MemoryRegionIoeventfd b) |
84 |
{ |
85 |
if (a.addr.start < b.addr.start) {
|
86 |
return true; |
87 |
} else if (a.addr.start > b.addr.start) { |
88 |
return false; |
89 |
} else if (a.addr.size < b.addr.size) { |
90 |
return true; |
91 |
} else if (a.addr.size > b.addr.size) { |
92 |
return false; |
93 |
} else if (a.match_data < b.match_data) { |
94 |
return true; |
95 |
} else if (a.match_data > b.match_data) { |
96 |
return false; |
97 |
} else if (a.match_data) { |
98 |
if (a.data < b.data) {
|
99 |
return true; |
100 |
} else if (a.data > b.data) { |
101 |
return false; |
102 |
} |
103 |
} |
104 |
if (a.fd < b.fd) {
|
105 |
return true; |
106 |
} else if (a.fd > b.fd) { |
107 |
return false; |
108 |
} |
109 |
return false; |
110 |
} |
111 |
|
112 |
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, |
113 |
MemoryRegionIoeventfd b) |
114 |
{ |
115 |
return !memory_region_ioeventfd_before(a, b)
|
116 |
&& !memory_region_ioeventfd_before(b, a); |
117 |
} |
118 |
|
119 |
typedef struct FlatRange FlatRange; |
120 |
typedef struct FlatView FlatView; |
121 |
|
122 |
/* Range of memory in the global map. Addresses are absolute. */
|
123 |
struct FlatRange {
|
124 |
MemoryRegion *mr; |
125 |
target_phys_addr_t offset_in_region; |
126 |
AddrRange addr; |
127 |
uint8_t dirty_log_mask; |
128 |
bool readable;
|
129 |
bool readonly;
|
130 |
}; |
131 |
|
132 |
/* Flattened global view of current active memory hierarchy. Kept in sorted
|
133 |
* order.
|
134 |
*/
|
135 |
struct FlatView {
|
136 |
FlatRange *ranges; |
137 |
unsigned nr;
|
138 |
unsigned nr_allocated;
|
139 |
}; |
140 |
|
141 |
typedef struct AddressSpace AddressSpace; |
142 |
typedef struct AddressSpaceOps AddressSpaceOps; |
143 |
|
144 |
/* A system address space - I/O, memory, etc. */
|
145 |
struct AddressSpace {
|
146 |
const AddressSpaceOps *ops;
|
147 |
MemoryRegion *root; |
148 |
FlatView current_map; |
149 |
int ioeventfd_nb;
|
150 |
MemoryRegionIoeventfd *ioeventfds; |
151 |
}; |
152 |
|
153 |
struct AddressSpaceOps {
|
154 |
void (*range_add)(AddressSpace *as, FlatRange *fr);
|
155 |
void (*range_del)(AddressSpace *as, FlatRange *fr);
|
156 |
void (*log_start)(AddressSpace *as, FlatRange *fr);
|
157 |
void (*log_stop)(AddressSpace *as, FlatRange *fr);
|
158 |
void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
|
159 |
void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
|
160 |
}; |
161 |
|
162 |
#define FOR_EACH_FLAT_RANGE(var, view) \
|
163 |
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
164 |
|
165 |
static bool flatrange_equal(FlatRange *a, FlatRange *b) |
166 |
{ |
167 |
return a->mr == b->mr
|
168 |
&& addrrange_equal(a->addr, b->addr) |
169 |
&& a->offset_in_region == b->offset_in_region |
170 |
&& a->readable == b->readable |
171 |
&& a->readonly == b->readonly; |
172 |
} |
173 |
|
174 |
static void flatview_init(FlatView *view) |
175 |
{ |
176 |
view->ranges = NULL;
|
177 |
view->nr = 0;
|
178 |
view->nr_allocated = 0;
|
179 |
} |
180 |
|
181 |
/* Insert a range into a given position. Caller is responsible for maintaining
|
182 |
* sorting order.
|
183 |
*/
|
184 |
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
185 |
{ |
186 |
if (view->nr == view->nr_allocated) {
|
187 |
view->nr_allocated = MAX(2 * view->nr, 10); |
188 |
view->ranges = g_realloc(view->ranges, |
189 |
view->nr_allocated * sizeof(*view->ranges));
|
190 |
} |
191 |
memmove(view->ranges + pos + 1, view->ranges + pos,
|
192 |
(view->nr - pos) * sizeof(FlatRange));
|
193 |
view->ranges[pos] = *range; |
194 |
++view->nr; |
195 |
} |
196 |
|
197 |
static void flatview_destroy(FlatView *view) |
198 |
{ |
199 |
g_free(view->ranges); |
200 |
} |
201 |
|
202 |
static bool can_merge(FlatRange *r1, FlatRange *r2) |
203 |
{ |
204 |
return addrrange_end(r1->addr) == r2->addr.start
|
205 |
&& r1->mr == r2->mr |
206 |
&& r1->offset_in_region + r1->addr.size == r2->offset_in_region |
207 |
&& r1->dirty_log_mask == r2->dirty_log_mask |
208 |
&& r1->readable == r2->readable |
209 |
&& r1->readonly == r2->readonly; |
210 |
} |
211 |
|
212 |
/* Attempt to simplify a view by merging ajacent ranges */
|
213 |
static void flatview_simplify(FlatView *view) |
214 |
{ |
215 |
unsigned i, j;
|
216 |
|
217 |
i = 0;
|
218 |
while (i < view->nr) {
|
219 |
j = i + 1;
|
220 |
while (j < view->nr
|
221 |
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
222 |
view->ranges[i].addr.size += view->ranges[j].addr.size; |
223 |
++j; |
224 |
} |
225 |
++i; |
226 |
memmove(&view->ranges[i], &view->ranges[j], |
227 |
(view->nr - j) * sizeof(view->ranges[j]));
|
228 |
view->nr -= j - i; |
229 |
} |
230 |
} |
231 |
|
232 |
static void memory_region_read_accessor(void *opaque, |
233 |
target_phys_addr_t addr, |
234 |
uint64_t *value, |
235 |
unsigned size,
|
236 |
unsigned shift,
|
237 |
uint64_t mask) |
238 |
{ |
239 |
MemoryRegion *mr = opaque; |
240 |
uint64_t tmp; |
241 |
|
242 |
tmp = mr->ops->read(mr->opaque, addr, size); |
243 |
*value |= (tmp & mask) << shift; |
244 |
} |
245 |
|
246 |
static void memory_region_write_accessor(void *opaque, |
247 |
target_phys_addr_t addr, |
248 |
uint64_t *value, |
249 |
unsigned size,
|
250 |
unsigned shift,
|
251 |
uint64_t mask) |
252 |
{ |
253 |
MemoryRegion *mr = opaque; |
254 |
uint64_t tmp; |
255 |
|
256 |
tmp = (*value >> shift) & mask; |
257 |
mr->ops->write(mr->opaque, addr, tmp, size); |
258 |
} |
259 |
|
260 |
static void access_with_adjusted_size(target_phys_addr_t addr, |
261 |
uint64_t *value, |
262 |
unsigned size,
|
263 |
unsigned access_size_min,
|
264 |
unsigned access_size_max,
|
265 |
void (*access)(void *opaque, |
266 |
target_phys_addr_t addr, |
267 |
uint64_t *value, |
268 |
unsigned size,
|
269 |
unsigned shift,
|
270 |
uint64_t mask), |
271 |
void *opaque)
|
272 |
{ |
273 |
uint64_t access_mask; |
274 |
unsigned access_size;
|
275 |
unsigned i;
|
276 |
|
277 |
if (!access_size_min) {
|
278 |
access_size_min = 1;
|
279 |
} |
280 |
if (!access_size_max) {
|
281 |
access_size_max = 4;
|
282 |
} |
283 |
access_size = MAX(MIN(size, access_size_max), access_size_min); |
284 |
access_mask = -1ULL >> (64 - access_size * 8); |
285 |
for (i = 0; i < size; i += access_size) { |
286 |
/* FIXME: big-endian support */
|
287 |
access(opaque, addr + i, value, access_size, i * 8, access_mask);
|
288 |
} |
289 |
} |
290 |
|
291 |
static void memory_region_prepare_ram_addr(MemoryRegion *mr); |
292 |
|
293 |
static void as_memory_range_add(AddressSpace *as, FlatRange *fr) |
294 |
{ |
295 |
ram_addr_t phys_offset, region_offset; |
296 |
|
297 |
memory_region_prepare_ram_addr(fr->mr); |
298 |
|
299 |
phys_offset = fr->mr->ram_addr; |
300 |
region_offset = fr->offset_in_region; |
301 |
/* cpu_register_physical_memory_log() wants region_offset for
|
302 |
* mmio, but prefers offseting phys_offset for RAM. Humour it.
|
303 |
*/
|
304 |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
|
305 |
phys_offset += region_offset; |
306 |
region_offset = 0;
|
307 |
} |
308 |
|
309 |
if (!fr->readable) {
|
310 |
phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD; |
311 |
} |
312 |
|
313 |
if (fr->readonly) {
|
314 |
phys_offset |= IO_MEM_ROM; |
315 |
} |
316 |
|
317 |
cpu_register_physical_memory_log(fr->addr.start, |
318 |
fr->addr.size, |
319 |
phys_offset, |
320 |
region_offset, |
321 |
fr->dirty_log_mask); |
322 |
} |
323 |
|
324 |
static void as_memory_range_del(AddressSpace *as, FlatRange *fr) |
325 |
{ |
326 |
if (fr->dirty_log_mask) {
|
327 |
cpu_physical_sync_dirty_bitmap(fr->addr.start, |
328 |
fr->addr.start + fr->addr.size); |
329 |
} |
330 |
cpu_register_physical_memory(fr->addr.start, fr->addr.size, |
331 |
IO_MEM_UNASSIGNED); |
332 |
} |
333 |
|
334 |
static void as_memory_log_start(AddressSpace *as, FlatRange *fr) |
335 |
{ |
336 |
cpu_physical_log_start(fr->addr.start, fr->addr.size); |
337 |
} |
338 |
|
339 |
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) |
340 |
{ |
341 |
cpu_physical_log_stop(fr->addr.start, fr->addr.size); |
342 |
} |
343 |
|
344 |
static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
345 |
{ |
346 |
int r;
|
347 |
|
348 |
assert(fd->match_data && fd->addr.size == 4);
|
349 |
|
350 |
r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, true);
|
351 |
if (r < 0) { |
352 |
abort(); |
353 |
} |
354 |
} |
355 |
|
356 |
static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) |
357 |
{ |
358 |
int r;
|
359 |
|
360 |
r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, false);
|
361 |
if (r < 0) { |
362 |
abort(); |
363 |
} |
364 |
} |
365 |
|
366 |
static const AddressSpaceOps address_space_ops_memory = { |
367 |
.range_add = as_memory_range_add, |
368 |
.range_del = as_memory_range_del, |
369 |
.log_start = as_memory_log_start, |
370 |
.log_stop = as_memory_log_stop, |
371 |
.ioeventfd_add = as_memory_ioeventfd_add, |
372 |
.ioeventfd_del = as_memory_ioeventfd_del, |
373 |
}; |
374 |
|
375 |
static AddressSpace address_space_memory = {
|
376 |
.ops = &address_space_ops_memory, |
377 |
}; |
378 |
|
379 |
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset, |
380 |
unsigned width, bool write) |
381 |
{ |
382 |
const MemoryRegionPortio *mrp;
|
383 |
|
384 |
for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
|
385 |
if (offset >= mrp->offset && offset < mrp->offset + mrp->len
|
386 |
&& width == mrp->size |
387 |
&& (write ? (bool)mrp->write : (bool)mrp->read)) { |
388 |
return mrp;
|
389 |
} |
390 |
} |
391 |
return NULL; |
392 |
} |
393 |
|
394 |
static void memory_region_iorange_read(IORange *iorange, |
395 |
uint64_t offset, |
396 |
unsigned width,
|
397 |
uint64_t *data) |
398 |
{ |
399 |
MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); |
400 |
|
401 |
if (mr->ops->old_portio) {
|
402 |
const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false); |
403 |
|
404 |
*data = ((uint64_t)1 << (width * 8)) - 1; |
405 |
if (mrp) {
|
406 |
*data = mrp->read(mr->opaque, offset); |
407 |
} |
408 |
return;
|
409 |
} |
410 |
*data = 0;
|
411 |
access_with_adjusted_size(offset, data, width, |
412 |
mr->ops->impl.min_access_size, |
413 |
mr->ops->impl.max_access_size, |
414 |
memory_region_read_accessor, mr); |
415 |
} |
416 |
|
417 |
static void memory_region_iorange_write(IORange *iorange, |
418 |
uint64_t offset, |
419 |
unsigned width,
|
420 |
uint64_t data) |
421 |
{ |
422 |
MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); |
423 |
|
424 |
if (mr->ops->old_portio) {
|
425 |
const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true); |
426 |
|
427 |
if (mrp) {
|
428 |
mrp->write(mr->opaque, offset, data); |
429 |
} |
430 |
return;
|
431 |
} |
432 |
access_with_adjusted_size(offset, &data, width, |
433 |
mr->ops->impl.min_access_size, |
434 |
mr->ops->impl.max_access_size, |
435 |
memory_region_write_accessor, mr); |
436 |
} |
437 |
|
438 |
static const IORangeOps memory_region_iorange_ops = { |
439 |
.read = memory_region_iorange_read, |
440 |
.write = memory_region_iorange_write, |
441 |
}; |
442 |
|
443 |
static void as_io_range_add(AddressSpace *as, FlatRange *fr) |
444 |
{ |
445 |
iorange_init(&fr->mr->iorange, &memory_region_iorange_ops, |
446 |
fr->addr.start,fr->addr.size); |
447 |
ioport_register(&fr->mr->iorange); |
448 |
} |
449 |
|
450 |
static void as_io_range_del(AddressSpace *as, FlatRange *fr) |
451 |
{ |
452 |
isa_unassign_ioport(fr->addr.start, fr->addr.size); |
453 |
} |
454 |
|
455 |
static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
456 |
{ |
457 |
int r;
|
458 |
|
459 |
assert(fd->match_data && fd->addr.size == 2);
|
460 |
|
461 |
r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, true);
|
462 |
if (r < 0) { |
463 |
abort(); |
464 |
} |
465 |
} |
466 |
|
467 |
static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) |
468 |
{ |
469 |
int r;
|
470 |
|
471 |
r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, false);
|
472 |
if (r < 0) { |
473 |
abort(); |
474 |
} |
475 |
} |
476 |
|
477 |
static const AddressSpaceOps address_space_ops_io = { |
478 |
.range_add = as_io_range_add, |
479 |
.range_del = as_io_range_del, |
480 |
.ioeventfd_add = as_io_ioeventfd_add, |
481 |
.ioeventfd_del = as_io_ioeventfd_del, |
482 |
}; |
483 |
|
484 |
static AddressSpace address_space_io = {
|
485 |
.ops = &address_space_ops_io, |
486 |
}; |
487 |
|
488 |
/* Render a memory region into the global view. Ranges in @view obscure
|
489 |
* ranges in @mr.
|
490 |
*/
|
491 |
static void render_memory_region(FlatView *view, |
492 |
MemoryRegion *mr, |
493 |
target_phys_addr_t base, |
494 |
AddrRange clip, |
495 |
bool readonly)
|
496 |
{ |
497 |
MemoryRegion *subregion; |
498 |
unsigned i;
|
499 |
target_phys_addr_t offset_in_region; |
500 |
int64_t remain; |
501 |
int64_t now; |
502 |
FlatRange fr; |
503 |
AddrRange tmp; |
504 |
|
505 |
base += mr->addr; |
506 |
readonly |= mr->readonly; |
507 |
|
508 |
tmp = addrrange_make(base, mr->size); |
509 |
|
510 |
if (!addrrange_intersects(tmp, clip)) {
|
511 |
return;
|
512 |
} |
513 |
|
514 |
clip = addrrange_intersection(tmp, clip); |
515 |
|
516 |
if (mr->alias) {
|
517 |
base -= mr->alias->addr; |
518 |
base -= mr->alias_offset; |
519 |
render_memory_region(view, mr->alias, base, clip, readonly); |
520 |
return;
|
521 |
} |
522 |
|
523 |
/* Render subregions in priority order. */
|
524 |
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
525 |
render_memory_region(view, subregion, base, clip, readonly); |
526 |
} |
527 |
|
528 |
if (!mr->terminates) {
|
529 |
return;
|
530 |
} |
531 |
|
532 |
offset_in_region = clip.start - base; |
533 |
base = clip.start; |
534 |
remain = clip.size; |
535 |
|
536 |
/* Render the region itself into any gaps left by the current view. */
|
537 |
for (i = 0; i < view->nr && remain; ++i) { |
538 |
if (base >= addrrange_end(view->ranges[i].addr)) {
|
539 |
continue;
|
540 |
} |
541 |
if (base < view->ranges[i].addr.start) {
|
542 |
now = MIN(remain, view->ranges[i].addr.start - base); |
543 |
fr.mr = mr; |
544 |
fr.offset_in_region = offset_in_region; |
545 |
fr.addr = addrrange_make(base, now); |
546 |
fr.dirty_log_mask = mr->dirty_log_mask; |
547 |
fr.readable = mr->readable; |
548 |
fr.readonly = readonly; |
549 |
flatview_insert(view, i, &fr); |
550 |
++i; |
551 |
base += now; |
552 |
offset_in_region += now; |
553 |
remain -= now; |
554 |
} |
555 |
if (base == view->ranges[i].addr.start) {
|
556 |
now = MIN(remain, view->ranges[i].addr.size); |
557 |
base += now; |
558 |
offset_in_region += now; |
559 |
remain -= now; |
560 |
} |
561 |
} |
562 |
if (remain) {
|
563 |
fr.mr = mr; |
564 |
fr.offset_in_region = offset_in_region; |
565 |
fr.addr = addrrange_make(base, remain); |
566 |
fr.dirty_log_mask = mr->dirty_log_mask; |
567 |
fr.readable = mr->readable; |
568 |
fr.readonly = readonly; |
569 |
flatview_insert(view, i, &fr); |
570 |
} |
571 |
} |
572 |
|
573 |
/* Render a memory topology into a list of disjoint absolute ranges. */
|
574 |
static FlatView generate_memory_topology(MemoryRegion *mr)
|
575 |
{ |
576 |
FlatView view; |
577 |
|
578 |
flatview_init(&view); |
579 |
|
580 |
render_memory_region(&view, mr, 0, addrrange_make(0, INT64_MAX), false); |
581 |
flatview_simplify(&view); |
582 |
|
583 |
return view;
|
584 |
} |
585 |
|
586 |
static void address_space_add_del_ioeventfds(AddressSpace *as, |
587 |
MemoryRegionIoeventfd *fds_new, |
588 |
unsigned fds_new_nb,
|
589 |
MemoryRegionIoeventfd *fds_old, |
590 |
unsigned fds_old_nb)
|
591 |
{ |
592 |
unsigned iold, inew;
|
593 |
|
594 |
/* Generate a symmetric difference of the old and new fd sets, adding
|
595 |
* and deleting as necessary.
|
596 |
*/
|
597 |
|
598 |
iold = inew = 0;
|
599 |
while (iold < fds_old_nb || inew < fds_new_nb) {
|
600 |
if (iold < fds_old_nb
|
601 |
&& (inew == fds_new_nb |
602 |
|| memory_region_ioeventfd_before(fds_old[iold], |
603 |
fds_new[inew]))) { |
604 |
as->ops->ioeventfd_del(as, &fds_old[iold]); |
605 |
++iold; |
606 |
} else if (inew < fds_new_nb |
607 |
&& (iold == fds_old_nb |
608 |
|| memory_region_ioeventfd_before(fds_new[inew], |
609 |
fds_old[iold]))) { |
610 |
as->ops->ioeventfd_add(as, &fds_new[inew]); |
611 |
++inew; |
612 |
} else {
|
613 |
++iold; |
614 |
++inew; |
615 |
} |
616 |
} |
617 |
} |
618 |
|
619 |
static void address_space_update_ioeventfds(AddressSpace *as) |
620 |
{ |
621 |
FlatRange *fr; |
622 |
unsigned ioeventfd_nb = 0; |
623 |
MemoryRegionIoeventfd *ioeventfds = NULL;
|
624 |
AddrRange tmp; |
625 |
unsigned i;
|
626 |
|
627 |
FOR_EACH_FLAT_RANGE(fr, &as->current_map) { |
628 |
for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { |
629 |
tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, |
630 |
fr->addr.start - fr->offset_in_region); |
631 |
if (addrrange_intersects(fr->addr, tmp)) {
|
632 |
++ioeventfd_nb; |
633 |
ioeventfds = g_realloc(ioeventfds, |
634 |
ioeventfd_nb * sizeof(*ioeventfds));
|
635 |
ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
|
636 |
ioeventfds[ioeventfd_nb-1].addr = tmp;
|
637 |
} |
638 |
} |
639 |
} |
640 |
|
641 |
address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, |
642 |
as->ioeventfds, as->ioeventfd_nb); |
643 |
|
644 |
g_free(as->ioeventfds); |
645 |
as->ioeventfds = ioeventfds; |
646 |
as->ioeventfd_nb = ioeventfd_nb; |
647 |
} |
648 |
|
649 |
static void address_space_update_topology_pass(AddressSpace *as, |
650 |
FlatView old_view, |
651 |
FlatView new_view, |
652 |
bool adding)
|
653 |
{ |
654 |
unsigned iold, inew;
|
655 |
FlatRange *frold, *frnew; |
656 |
|
657 |
/* Generate a symmetric difference of the old and new memory maps.
|
658 |
* Kill ranges in the old map, and instantiate ranges in the new map.
|
659 |
*/
|
660 |
iold = inew = 0;
|
661 |
while (iold < old_view.nr || inew < new_view.nr) {
|
662 |
if (iold < old_view.nr) {
|
663 |
frold = &old_view.ranges[iold]; |
664 |
} else {
|
665 |
frold = NULL;
|
666 |
} |
667 |
if (inew < new_view.nr) {
|
668 |
frnew = &new_view.ranges[inew]; |
669 |
} else {
|
670 |
frnew = NULL;
|
671 |
} |
672 |
|
673 |
if (frold
|
674 |
&& (!frnew |
675 |
|| frold->addr.start < frnew->addr.start |
676 |
|| (frold->addr.start == frnew->addr.start |
677 |
&& !flatrange_equal(frold, frnew)))) { |
678 |
/* In old, but (not in new, or in new but attributes changed). */
|
679 |
|
680 |
if (!adding) {
|
681 |
as->ops->range_del(as, frold); |
682 |
} |
683 |
|
684 |
++iold; |
685 |
} else if (frold && frnew && flatrange_equal(frold, frnew)) { |
686 |
/* In both (logging may have changed) */
|
687 |
|
688 |
if (adding) {
|
689 |
if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
|
690 |
as->ops->log_stop(as, frnew); |
691 |
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
692 |
as->ops->log_start(as, frnew); |
693 |
} |
694 |
} |
695 |
|
696 |
++iold; |
697 |
++inew; |
698 |
} else {
|
699 |
/* In new */
|
700 |
|
701 |
if (adding) {
|
702 |
as->ops->range_add(as, frnew); |
703 |
} |
704 |
|
705 |
++inew; |
706 |
} |
707 |
} |
708 |
} |
709 |
|
710 |
|
711 |
static void address_space_update_topology(AddressSpace *as) |
712 |
{ |
713 |
FlatView old_view = as->current_map; |
714 |
FlatView new_view = generate_memory_topology(as->root); |
715 |
|
716 |
address_space_update_topology_pass(as, old_view, new_view, false);
|
717 |
address_space_update_topology_pass(as, old_view, new_view, true);
|
718 |
|
719 |
as->current_map = new_view; |
720 |
flatview_destroy(&old_view); |
721 |
address_space_update_ioeventfds(as); |
722 |
} |
723 |
|
724 |
static void memory_region_update_topology(void) |
725 |
{ |
726 |
if (memory_region_transaction_depth) {
|
727 |
return;
|
728 |
} |
729 |
|
730 |
if (address_space_memory.root) {
|
731 |
address_space_update_topology(&address_space_memory); |
732 |
} |
733 |
if (address_space_io.root) {
|
734 |
address_space_update_topology(&address_space_io); |
735 |
} |
736 |
} |
737 |
|
738 |
void memory_region_transaction_begin(void) |
739 |
{ |
740 |
++memory_region_transaction_depth; |
741 |
} |
742 |
|
743 |
void memory_region_transaction_commit(void) |
744 |
{ |
745 |
assert(memory_region_transaction_depth); |
746 |
--memory_region_transaction_depth; |
747 |
memory_region_update_topology(); |
748 |
} |
749 |
|
750 |
static void memory_region_destructor_none(MemoryRegion *mr) |
751 |
{ |
752 |
} |
753 |
|
754 |
static void memory_region_destructor_ram(MemoryRegion *mr) |
755 |
{ |
756 |
qemu_ram_free(mr->ram_addr); |
757 |
} |
758 |
|
759 |
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) |
760 |
{ |
761 |
qemu_ram_free_from_ptr(mr->ram_addr); |
762 |
} |
763 |
|
764 |
static void memory_region_destructor_iomem(MemoryRegion *mr) |
765 |
{ |
766 |
cpu_unregister_io_memory(mr->ram_addr); |
767 |
} |
768 |
|
769 |
static void memory_region_destructor_rom_device(MemoryRegion *mr) |
770 |
{ |
771 |
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); |
772 |
cpu_unregister_io_memory(mr->ram_addr & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); |
773 |
} |
774 |
|
775 |
void memory_region_init(MemoryRegion *mr,
|
776 |
const char *name, |
777 |
uint64_t size) |
778 |
{ |
779 |
mr->ops = NULL;
|
780 |
mr->parent = NULL;
|
781 |
mr->size = size; |
782 |
mr->addr = 0;
|
783 |
mr->offset = 0;
|
784 |
mr->terminates = false;
|
785 |
mr->readable = true;
|
786 |
mr->readonly = false;
|
787 |
mr->destructor = memory_region_destructor_none; |
788 |
mr->priority = 0;
|
789 |
mr->may_overlap = false;
|
790 |
mr->alias = NULL;
|
791 |
QTAILQ_INIT(&mr->subregions); |
792 |
memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
793 |
QTAILQ_INIT(&mr->coalesced); |
794 |
mr->name = g_strdup(name); |
795 |
mr->dirty_log_mask = 0;
|
796 |
mr->ioeventfd_nb = 0;
|
797 |
mr->ioeventfds = NULL;
|
798 |
} |
799 |
|
800 |
static bool memory_region_access_valid(MemoryRegion *mr, |
801 |
target_phys_addr_t addr, |
802 |
unsigned size)
|
803 |
{ |
804 |
if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
805 |
return false; |
806 |
} |
807 |
|
808 |
/* Treat zero as compatibility all valid */
|
809 |
if (!mr->ops->valid.max_access_size) {
|
810 |
return true; |
811 |
} |
812 |
|
813 |
if (size > mr->ops->valid.max_access_size
|
814 |
|| size < mr->ops->valid.min_access_size) { |
815 |
return false; |
816 |
} |
817 |
return true; |
818 |
} |
819 |
|
820 |
static uint32_t memory_region_read_thunk_n(void *_mr, |
821 |
target_phys_addr_t addr, |
822 |
unsigned size)
|
823 |
{ |
824 |
MemoryRegion *mr = _mr; |
825 |
uint64_t data = 0;
|
826 |
|
827 |
if (!memory_region_access_valid(mr, addr, size)) {
|
828 |
return -1U; /* FIXME: better signalling */ |
829 |
} |
830 |
|
831 |
if (!mr->ops->read) {
|
832 |
return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
|
833 |
} |
834 |
|
835 |
/* FIXME: support unaligned access */
|
836 |
access_with_adjusted_size(addr + mr->offset, &data, size, |
837 |
mr->ops->impl.min_access_size, |
838 |
mr->ops->impl.max_access_size, |
839 |
memory_region_read_accessor, mr); |
840 |
|
841 |
return data;
|
842 |
} |
843 |
|
844 |
static void memory_region_write_thunk_n(void *_mr, |
845 |
target_phys_addr_t addr, |
846 |
unsigned size,
|
847 |
uint64_t data) |
848 |
{ |
849 |
MemoryRegion *mr = _mr; |
850 |
|
851 |
if (!memory_region_access_valid(mr, addr, size)) {
|
852 |
return; /* FIXME: better signalling */ |
853 |
} |
854 |
|
855 |
if (!mr->ops->write) {
|
856 |
mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data); |
857 |
return;
|
858 |
} |
859 |
|
860 |
/* FIXME: support unaligned access */
|
861 |
access_with_adjusted_size(addr + mr->offset, &data, size, |
862 |
mr->ops->impl.min_access_size, |
863 |
mr->ops->impl.max_access_size, |
864 |
memory_region_write_accessor, mr); |
865 |
} |
866 |
|
867 |
static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr) |
868 |
{ |
869 |
return memory_region_read_thunk_n(mr, addr, 1); |
870 |
} |
871 |
|
872 |
static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr) |
873 |
{ |
874 |
return memory_region_read_thunk_n(mr, addr, 2); |
875 |
} |
876 |
|
877 |
static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr) |
878 |
{ |
879 |
return memory_region_read_thunk_n(mr, addr, 4); |
880 |
} |
881 |
|
882 |
static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr, |
883 |
uint32_t data) |
884 |
{ |
885 |
memory_region_write_thunk_n(mr, addr, 1, data);
|
886 |
} |
887 |
|
888 |
static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr, |
889 |
uint32_t data) |
890 |
{ |
891 |
memory_region_write_thunk_n(mr, addr, 2, data);
|
892 |
} |
893 |
|
894 |
static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr, |
895 |
uint32_t data) |
896 |
{ |
897 |
memory_region_write_thunk_n(mr, addr, 4, data);
|
898 |
} |
899 |
|
900 |
static CPUReadMemoryFunc * const memory_region_read_thunk[] = { |
901 |
memory_region_read_thunk_b, |
902 |
memory_region_read_thunk_w, |
903 |
memory_region_read_thunk_l, |
904 |
}; |
905 |
|
906 |
static CPUWriteMemoryFunc * const memory_region_write_thunk[] = { |
907 |
memory_region_write_thunk_b, |
908 |
memory_region_write_thunk_w, |
909 |
memory_region_write_thunk_l, |
910 |
}; |
911 |
|
912 |
static void memory_region_prepare_ram_addr(MemoryRegion *mr) |
913 |
{ |
914 |
if (mr->backend_registered) {
|
915 |
return;
|
916 |
} |
917 |
|
918 |
mr->destructor = memory_region_destructor_iomem; |
919 |
mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk, |
920 |
memory_region_write_thunk, |
921 |
mr, |
922 |
mr->ops->endianness); |
923 |
mr->backend_registered = true;
|
924 |
} |
925 |
|
926 |
void memory_region_init_io(MemoryRegion *mr,
|
927 |
const MemoryRegionOps *ops,
|
928 |
void *opaque,
|
929 |
const char *name, |
930 |
uint64_t size) |
931 |
{ |
932 |
memory_region_init(mr, name, size); |
933 |
mr->ops = ops; |
934 |
mr->opaque = opaque; |
935 |
mr->terminates = true;
|
936 |
mr->backend_registered = false;
|
937 |
} |
938 |
|
939 |
void memory_region_init_ram(MemoryRegion *mr,
|
940 |
DeviceState *dev, |
941 |
const char *name, |
942 |
uint64_t size) |
943 |
{ |
944 |
memory_region_init(mr, name, size); |
945 |
mr->terminates = true;
|
946 |
mr->destructor = memory_region_destructor_ram; |
947 |
mr->ram_addr = qemu_ram_alloc(dev, name, size); |
948 |
mr->backend_registered = true;
|
949 |
} |
950 |
|
951 |
void memory_region_init_ram_ptr(MemoryRegion *mr,
|
952 |
DeviceState *dev, |
953 |
const char *name, |
954 |
uint64_t size, |
955 |
void *ptr)
|
956 |
{ |
957 |
memory_region_init(mr, name, size); |
958 |
mr->terminates = true;
|
959 |
mr->destructor = memory_region_destructor_ram_from_ptr; |
960 |
mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr); |
961 |
mr->backend_registered = true;
|
962 |
} |
963 |
|
964 |
void memory_region_init_alias(MemoryRegion *mr,
|
965 |
const char *name, |
966 |
MemoryRegion *orig, |
967 |
target_phys_addr_t offset, |
968 |
uint64_t size) |
969 |
{ |
970 |
memory_region_init(mr, name, size); |
971 |
mr->alias = orig; |
972 |
mr->alias_offset = offset; |
973 |
} |
974 |
|
975 |
void memory_region_init_rom_device(MemoryRegion *mr,
|
976 |
const MemoryRegionOps *ops,
|
977 |
void *opaque,
|
978 |
DeviceState *dev, |
979 |
const char *name, |
980 |
uint64_t size) |
981 |
{ |
982 |
memory_region_init(mr, name, size); |
983 |
mr->ops = ops; |
984 |
mr->opaque = opaque; |
985 |
mr->terminates = true;
|
986 |
mr->destructor = memory_region_destructor_rom_device; |
987 |
mr->ram_addr = qemu_ram_alloc(dev, name, size); |
988 |
mr->ram_addr |= cpu_register_io_memory(memory_region_read_thunk, |
989 |
memory_region_write_thunk, |
990 |
mr, |
991 |
mr->ops->endianness); |
992 |
mr->ram_addr |= IO_MEM_ROMD; |
993 |
mr->backend_registered = true;
|
994 |
} |
995 |
|
996 |
void memory_region_destroy(MemoryRegion *mr)
|
997 |
{ |
998 |
assert(QTAILQ_EMPTY(&mr->subregions)); |
999 |
mr->destructor(mr); |
1000 |
memory_region_clear_coalescing(mr); |
1001 |
g_free((char *)mr->name);
|
1002 |
g_free(mr->ioeventfds); |
1003 |
} |
1004 |
|
1005 |
uint64_t memory_region_size(MemoryRegion *mr) |
1006 |
{ |
1007 |
return mr->size;
|
1008 |
} |
1009 |
|
1010 |
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
|
1011 |
{ |
1012 |
mr->offset = offset; |
1013 |
} |
1014 |
|
1015 |
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
1016 |
{ |
1017 |
uint8_t mask = 1 << client;
|
1018 |
|
1019 |
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
1020 |
memory_region_update_topology(); |
1021 |
} |
1022 |
|
1023 |
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1024 |
unsigned client)
|
1025 |
{ |
1026 |
assert(mr->terminates); |
1027 |
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client); |
1028 |
} |
1029 |
|
1030 |
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
|
1031 |
{ |
1032 |
assert(mr->terminates); |
1033 |
return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
|
1034 |
} |
1035 |
|
1036 |
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
|
1037 |
{ |
1038 |
FlatRange *fr; |
1039 |
|
1040 |
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
1041 |
if (fr->mr == mr) {
|
1042 |
cpu_physical_sync_dirty_bitmap(fr->addr.start, |
1043 |
fr->addr.start + fr->addr.size); |
1044 |
} |
1045 |
} |
1046 |
} |
1047 |
|
1048 |
void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
1049 |
{ |
1050 |
if (mr->readonly != readonly) {
|
1051 |
mr->readonly = readonly; |
1052 |
memory_region_update_topology(); |
1053 |
} |
1054 |
} |
1055 |
|
1056 |
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable) |
1057 |
{ |
1058 |
if (mr->readable != readable) {
|
1059 |
mr->readable = readable; |
1060 |
memory_region_update_topology(); |
1061 |
} |
1062 |
} |
1063 |
|
1064 |
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1065 |
target_phys_addr_t size, unsigned client)
|
1066 |
{ |
1067 |
assert(mr->terminates); |
1068 |
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
1069 |
mr->ram_addr + addr + size, |
1070 |
1 << client);
|
1071 |
} |
1072 |
|
1073 |
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
1074 |
{ |
1075 |
if (mr->alias) {
|
1076 |
return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
|
1077 |
} |
1078 |
|
1079 |
assert(mr->terminates); |
1080 |
|
1081 |
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
|
1082 |
} |
1083 |
|
1084 |
static void memory_region_update_coalesced_range(MemoryRegion *mr) |
1085 |
{ |
1086 |
FlatRange *fr; |
1087 |
CoalescedMemoryRange *cmr; |
1088 |
AddrRange tmp; |
1089 |
|
1090 |
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
1091 |
if (fr->mr == mr) {
|
1092 |
qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size); |
1093 |
QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
1094 |
tmp = addrrange_shift(cmr->addr, |
1095 |
fr->addr.start - fr->offset_in_region); |
1096 |
if (!addrrange_intersects(tmp, fr->addr)) {
|
1097 |
continue;
|
1098 |
} |
1099 |
tmp = addrrange_intersection(tmp, fr->addr); |
1100 |
qemu_register_coalesced_mmio(tmp.start, tmp.size); |
1101 |
} |
1102 |
} |
1103 |
} |
1104 |
} |
1105 |
|
1106 |
void memory_region_set_coalescing(MemoryRegion *mr)
|
1107 |
{ |
1108 |
memory_region_clear_coalescing(mr); |
1109 |
memory_region_add_coalescing(mr, 0, mr->size);
|
1110 |
} |
1111 |
|
1112 |
void memory_region_add_coalescing(MemoryRegion *mr,
|
1113 |
target_phys_addr_t offset, |
1114 |
uint64_t size) |
1115 |
{ |
1116 |
CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
|
1117 |
|
1118 |
cmr->addr = addrrange_make(offset, size); |
1119 |
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
1120 |
memory_region_update_coalesced_range(mr); |
1121 |
} |
1122 |
|
1123 |
void memory_region_clear_coalescing(MemoryRegion *mr)
|
1124 |
{ |
1125 |
CoalescedMemoryRange *cmr; |
1126 |
|
1127 |
while (!QTAILQ_EMPTY(&mr->coalesced)) {
|
1128 |
cmr = QTAILQ_FIRST(&mr->coalesced); |
1129 |
QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
1130 |
g_free(cmr); |
1131 |
} |
1132 |
memory_region_update_coalesced_range(mr); |
1133 |
} |
1134 |
|
1135 |
void memory_region_add_eventfd(MemoryRegion *mr,
|
1136 |
target_phys_addr_t addr, |
1137 |
unsigned size,
|
1138 |
bool match_data,
|
1139 |
uint64_t data, |
1140 |
int fd)
|
1141 |
{ |
1142 |
MemoryRegionIoeventfd mrfd = { |
1143 |
.addr.start = addr, |
1144 |
.addr.size = size, |
1145 |
.match_data = match_data, |
1146 |
.data = data, |
1147 |
.fd = fd, |
1148 |
}; |
1149 |
unsigned i;
|
1150 |
|
1151 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1152 |
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
|
1153 |
break;
|
1154 |
} |
1155 |
} |
1156 |
++mr->ioeventfd_nb; |
1157 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1158 |
sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
|
1159 |
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
|
1160 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); |
1161 |
mr->ioeventfds[i] = mrfd; |
1162 |
memory_region_update_topology(); |
1163 |
} |
1164 |
|
1165 |
void memory_region_del_eventfd(MemoryRegion *mr,
|
1166 |
target_phys_addr_t addr, |
1167 |
unsigned size,
|
1168 |
bool match_data,
|
1169 |
uint64_t data, |
1170 |
int fd)
|
1171 |
{ |
1172 |
MemoryRegionIoeventfd mrfd = { |
1173 |
.addr.start = addr, |
1174 |
.addr.size = size, |
1175 |
.match_data = match_data, |
1176 |
.data = data, |
1177 |
.fd = fd, |
1178 |
}; |
1179 |
unsigned i;
|
1180 |
|
1181 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1182 |
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
|
1183 |
break;
|
1184 |
} |
1185 |
} |
1186 |
assert(i != mr->ioeventfd_nb); |
1187 |
memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
|
1188 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); |
1189 |
--mr->ioeventfd_nb; |
1190 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1191 |
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); |
1192 |
memory_region_update_topology(); |
1193 |
} |
1194 |
|
1195 |
static void memory_region_add_subregion_common(MemoryRegion *mr, |
1196 |
target_phys_addr_t offset, |
1197 |
MemoryRegion *subregion) |
1198 |
{ |
1199 |
MemoryRegion *other; |
1200 |
|
1201 |
assert(!subregion->parent); |
1202 |
subregion->parent = mr; |
1203 |
subregion->addr = offset; |
1204 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1205 |
if (subregion->may_overlap || other->may_overlap) {
|
1206 |
continue;
|
1207 |
} |
1208 |
if (offset >= other->addr + other->size
|
1209 |
|| offset + subregion->size <= other->addr) { |
1210 |
continue;
|
1211 |
} |
1212 |
#if 0
|
1213 |
printf("warning: subregion collision %llx/%llx (%s) "
|
1214 |
"vs %llx/%llx (%s)\n",
|
1215 |
(unsigned long long)offset,
|
1216 |
(unsigned long long)subregion->size,
|
1217 |
subregion->name,
|
1218 |
(unsigned long long)other->addr,
|
1219 |
(unsigned long long)other->size,
|
1220 |
other->name);
|
1221 |
#endif
|
1222 |
} |
1223 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1224 |
if (subregion->priority >= other->priority) {
|
1225 |
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
1226 |
goto done;
|
1227 |
} |
1228 |
} |
1229 |
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
1230 |
done:
|
1231 |
memory_region_update_topology(); |
1232 |
} |
1233 |
|
1234 |
|
1235 |
void memory_region_add_subregion(MemoryRegion *mr,
|
1236 |
target_phys_addr_t offset, |
1237 |
MemoryRegion *subregion) |
1238 |
{ |
1239 |
subregion->may_overlap = false;
|
1240 |
subregion->priority = 0;
|
1241 |
memory_region_add_subregion_common(mr, offset, subregion); |
1242 |
} |
1243 |
|
1244 |
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
1245 |
target_phys_addr_t offset, |
1246 |
MemoryRegion *subregion, |
1247 |
unsigned priority)
|
1248 |
{ |
1249 |
subregion->may_overlap = true;
|
1250 |
subregion->priority = priority; |
1251 |
memory_region_add_subregion_common(mr, offset, subregion); |
1252 |
} |
1253 |
|
1254 |
void memory_region_del_subregion(MemoryRegion *mr,
|
1255 |
MemoryRegion *subregion) |
1256 |
{ |
1257 |
assert(subregion->parent == mr); |
1258 |
subregion->parent = NULL;
|
1259 |
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
1260 |
memory_region_update_topology(); |
1261 |
} |
1262 |
|
1263 |
void set_system_memory_map(MemoryRegion *mr)
|
1264 |
{ |
1265 |
address_space_memory.root = mr; |
1266 |
memory_region_update_topology(); |
1267 |
} |
1268 |
|
1269 |
void set_system_io_map(MemoryRegion *mr)
|
1270 |
{ |
1271 |
address_space_io.root = mr; |
1272 |
memory_region_update_topology(); |
1273 |
} |
1274 |
|
1275 |
typedef struct MemoryRegionList MemoryRegionList; |
1276 |
|
1277 |
struct MemoryRegionList {
|
1278 |
const MemoryRegion *mr;
|
1279 |
bool printed;
|
1280 |
QTAILQ_ENTRY(MemoryRegionList) queue; |
1281 |
}; |
1282 |
|
1283 |
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
|
1284 |
|
1285 |
static void mtree_print_mr(fprintf_function mon_printf, void *f, |
1286 |
const MemoryRegion *mr, unsigned int level, |
1287 |
target_phys_addr_t base, |
1288 |
MemoryRegionListHead *print_queue) |
1289 |
{ |
1290 |
const MemoryRegion *submr;
|
1291 |
unsigned int i; |
1292 |
|
1293 |
|
1294 |
if (!mr) {
|
1295 |
return;
|
1296 |
} |
1297 |
|
1298 |
for (i = 0; i < level; i++) { |
1299 |
mon_printf(f, " ");
|
1300 |
} |
1301 |
|
1302 |
if (mr->alias) {
|
1303 |
MemoryRegionList *ml; |
1304 |
bool found = false; |
1305 |
|
1306 |
/* check if the alias is already in the queue */
|
1307 |
QTAILQ_FOREACH(ml, print_queue, queue) { |
1308 |
if (ml->mr == mr->alias && !ml->printed) {
|
1309 |
found = true;
|
1310 |
} |
1311 |
} |
1312 |
|
1313 |
if (!found) {
|
1314 |
ml = g_new(MemoryRegionList, 1);
|
1315 |
ml->mr = mr->alias; |
1316 |
ml->printed = false;
|
1317 |
QTAILQ_INSERT_TAIL(print_queue, ml, queue); |
1318 |
} |
1319 |
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s " |
1320 |
TARGET_FMT_plx "-" TARGET_FMT_plx "\n", |
1321 |
base + mr->addr, |
1322 |
base + mr->addr + (target_phys_addr_t)mr->size - 1,
|
1323 |
mr->priority, |
1324 |
mr->name, |
1325 |
mr->alias->name, |
1326 |
mr->alias_offset, |
1327 |
mr->alias_offset + (target_phys_addr_t)mr->size - 1);
|
1328 |
} else {
|
1329 |
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n", |
1330 |
base + mr->addr, |
1331 |
base + mr->addr + (target_phys_addr_t)mr->size - 1,
|
1332 |
mr->priority, |
1333 |
mr->name); |
1334 |
} |
1335 |
QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { |
1336 |
mtree_print_mr(mon_printf, f, submr, level + 1, base + mr->addr,
|
1337 |
print_queue); |
1338 |
} |
1339 |
} |
1340 |
|
1341 |
void mtree_info(fprintf_function mon_printf, void *f) |
1342 |
{ |
1343 |
MemoryRegionListHead ml_head; |
1344 |
MemoryRegionList *ml, *ml2; |
1345 |
|
1346 |
QTAILQ_INIT(&ml_head); |
1347 |
|
1348 |
mon_printf(f, "memory\n");
|
1349 |
mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head); |
1350 |
|
1351 |
/* print aliased regions */
|
1352 |
QTAILQ_FOREACH(ml, &ml_head, queue) { |
1353 |
if (!ml->printed) {
|
1354 |
mon_printf(f, "%s\n", ml->mr->name);
|
1355 |
mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); |
1356 |
} |
1357 |
} |
1358 |
|
1359 |
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) { |
1360 |
g_free(ml2); |
1361 |
} |
1362 |
|
1363 |
if (address_space_io.root &&
|
1364 |
!QTAILQ_EMPTY(&address_space_io.root->subregions)) { |
1365 |
QTAILQ_INIT(&ml_head); |
1366 |
mon_printf(f, "I/O\n");
|
1367 |
mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head); |
1368 |
} |
1369 |
} |