root / memory.c @ 9e56e756
History | View | Annotate | Download (36.5 kB)
1 |
/*
|
---|---|
2 |
* Physical memory management
|
3 |
*
|
4 |
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
5 |
*
|
6 |
* Authors:
|
7 |
* Avi Kivity <avi@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "memory.h" |
15 |
#include "exec-memory.h" |
16 |
#include "ioport.h" |
17 |
#include "bitops.h" |
18 |
#include "kvm.h" |
19 |
#include <assert.h> |
20 |
|
21 |
unsigned memory_region_transaction_depth = 0; |
22 |
|
23 |
typedef struct AddrRange AddrRange; |
24 |
|
25 |
/*
|
26 |
* Note using signed integers limits us to physical addresses at most
|
27 |
* 63 bits wide. They are needed for negative offsetting in aliases
|
28 |
* (large MemoryRegion::alias_offset).
|
29 |
*/
|
30 |
struct AddrRange {
|
31 |
int64_t start; |
32 |
int64_t size; |
33 |
}; |
34 |
|
35 |
static AddrRange addrrange_make(int64_t start, int64_t size)
|
36 |
{ |
37 |
return (AddrRange) { start, size };
|
38 |
} |
39 |
|
40 |
static bool addrrange_equal(AddrRange r1, AddrRange r2) |
41 |
{ |
42 |
return r1.start == r2.start && r1.size == r2.size;
|
43 |
} |
44 |
|
45 |
static int64_t addrrange_end(AddrRange r)
|
46 |
{ |
47 |
return r.start + r.size;
|
48 |
} |
49 |
|
50 |
static AddrRange addrrange_shift(AddrRange range, int64_t delta)
|
51 |
{ |
52 |
range.start += delta; |
53 |
return range;
|
54 |
} |
55 |
|
56 |
static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
57 |
{ |
58 |
return (r1.start >= r2.start && r1.start < r2.start + r2.size)
|
59 |
|| (r2.start >= r1.start && r2.start < r1.start + r1.size); |
60 |
} |
61 |
|
62 |
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
|
63 |
{ |
64 |
int64_t start = MAX(r1.start, r2.start); |
65 |
/* off-by-one arithmetic to prevent overflow */
|
66 |
int64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); |
67 |
return addrrange_make(start, end - start + 1); |
68 |
} |
69 |
|
70 |
struct CoalescedMemoryRange {
|
71 |
AddrRange addr; |
72 |
QTAILQ_ENTRY(CoalescedMemoryRange) link; |
73 |
}; |
74 |
|
75 |
struct MemoryRegionIoeventfd {
|
76 |
AddrRange addr; |
77 |
bool match_data;
|
78 |
uint64_t data; |
79 |
int fd;
|
80 |
}; |
81 |
|
82 |
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, |
83 |
MemoryRegionIoeventfd b) |
84 |
{ |
85 |
if (a.addr.start < b.addr.start) {
|
86 |
return true; |
87 |
} else if (a.addr.start > b.addr.start) { |
88 |
return false; |
89 |
} else if (a.addr.size < b.addr.size) { |
90 |
return true; |
91 |
} else if (a.addr.size > b.addr.size) { |
92 |
return false; |
93 |
} else if (a.match_data < b.match_data) { |
94 |
return true; |
95 |
} else if (a.match_data > b.match_data) { |
96 |
return false; |
97 |
} else if (a.match_data) { |
98 |
if (a.data < b.data) {
|
99 |
return true; |
100 |
} else if (a.data > b.data) { |
101 |
return false; |
102 |
} |
103 |
} |
104 |
if (a.fd < b.fd) {
|
105 |
return true; |
106 |
} else if (a.fd > b.fd) { |
107 |
return false; |
108 |
} |
109 |
return false; |
110 |
} |
111 |
|
112 |
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, |
113 |
MemoryRegionIoeventfd b) |
114 |
{ |
115 |
return !memory_region_ioeventfd_before(a, b)
|
116 |
&& !memory_region_ioeventfd_before(b, a); |
117 |
} |
118 |
|
119 |
typedef struct FlatRange FlatRange; |
120 |
typedef struct FlatView FlatView; |
121 |
|
122 |
/* Range of memory in the global map. Addresses are absolute. */
|
123 |
struct FlatRange {
|
124 |
MemoryRegion *mr; |
125 |
target_phys_addr_t offset_in_region; |
126 |
AddrRange addr; |
127 |
uint8_t dirty_log_mask; |
128 |
bool readable;
|
129 |
}; |
130 |
|
131 |
/* Flattened global view of current active memory hierarchy. Kept in sorted
|
132 |
* order.
|
133 |
*/
|
134 |
struct FlatView {
|
135 |
FlatRange *ranges; |
136 |
unsigned nr;
|
137 |
unsigned nr_allocated;
|
138 |
}; |
139 |
|
140 |
typedef struct AddressSpace AddressSpace; |
141 |
typedef struct AddressSpaceOps AddressSpaceOps; |
142 |
|
143 |
/* A system address space - I/O, memory, etc. */
|
144 |
struct AddressSpace {
|
145 |
const AddressSpaceOps *ops;
|
146 |
MemoryRegion *root; |
147 |
FlatView current_map; |
148 |
int ioeventfd_nb;
|
149 |
MemoryRegionIoeventfd *ioeventfds; |
150 |
}; |
151 |
|
152 |
struct AddressSpaceOps {
|
153 |
void (*range_add)(AddressSpace *as, FlatRange *fr);
|
154 |
void (*range_del)(AddressSpace *as, FlatRange *fr);
|
155 |
void (*log_start)(AddressSpace *as, FlatRange *fr);
|
156 |
void (*log_stop)(AddressSpace *as, FlatRange *fr);
|
157 |
void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
|
158 |
void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
|
159 |
}; |
160 |
|
161 |
#define FOR_EACH_FLAT_RANGE(var, view) \
|
162 |
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
|
163 |
|
164 |
static bool flatrange_equal(FlatRange *a, FlatRange *b) |
165 |
{ |
166 |
return a->mr == b->mr
|
167 |
&& addrrange_equal(a->addr, b->addr) |
168 |
&& a->offset_in_region == b->offset_in_region |
169 |
&& a->readable == b->readable; |
170 |
} |
171 |
|
172 |
static void flatview_init(FlatView *view) |
173 |
{ |
174 |
view->ranges = NULL;
|
175 |
view->nr = 0;
|
176 |
view->nr_allocated = 0;
|
177 |
} |
178 |
|
179 |
/* Insert a range into a given position. Caller is responsible for maintaining
|
180 |
* sorting order.
|
181 |
*/
|
182 |
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
183 |
{ |
184 |
if (view->nr == view->nr_allocated) {
|
185 |
view->nr_allocated = MAX(2 * view->nr, 10); |
186 |
view->ranges = g_realloc(view->ranges, |
187 |
view->nr_allocated * sizeof(*view->ranges));
|
188 |
} |
189 |
memmove(view->ranges + pos + 1, view->ranges + pos,
|
190 |
(view->nr - pos) * sizeof(FlatRange));
|
191 |
view->ranges[pos] = *range; |
192 |
++view->nr; |
193 |
} |
194 |
|
195 |
static void flatview_destroy(FlatView *view) |
196 |
{ |
197 |
g_free(view->ranges); |
198 |
} |
199 |
|
200 |
static bool can_merge(FlatRange *r1, FlatRange *r2) |
201 |
{ |
202 |
return addrrange_end(r1->addr) == r2->addr.start
|
203 |
&& r1->mr == r2->mr |
204 |
&& r1->offset_in_region + r1->addr.size == r2->offset_in_region |
205 |
&& r1->dirty_log_mask == r2->dirty_log_mask |
206 |
&& r1->readable == r2->readable; |
207 |
} |
208 |
|
209 |
/* Attempt to simplify a view by merging ajacent ranges */
|
210 |
static void flatview_simplify(FlatView *view) |
211 |
{ |
212 |
unsigned i, j;
|
213 |
|
214 |
i = 0;
|
215 |
while (i < view->nr) {
|
216 |
j = i + 1;
|
217 |
while (j < view->nr
|
218 |
&& can_merge(&view->ranges[j-1], &view->ranges[j])) {
|
219 |
view->ranges[i].addr.size += view->ranges[j].addr.size; |
220 |
++j; |
221 |
} |
222 |
++i; |
223 |
memmove(&view->ranges[i], &view->ranges[j], |
224 |
(view->nr - j) * sizeof(view->ranges[j]));
|
225 |
view->nr -= j - i; |
226 |
} |
227 |
} |
228 |
|
229 |
static void memory_region_read_accessor(void *opaque, |
230 |
target_phys_addr_t addr, |
231 |
uint64_t *value, |
232 |
unsigned size,
|
233 |
unsigned shift,
|
234 |
uint64_t mask) |
235 |
{ |
236 |
MemoryRegion *mr = opaque; |
237 |
uint64_t tmp; |
238 |
|
239 |
tmp = mr->ops->read(mr->opaque, addr, size); |
240 |
*value |= (tmp & mask) << shift; |
241 |
} |
242 |
|
243 |
static void memory_region_write_accessor(void *opaque, |
244 |
target_phys_addr_t addr, |
245 |
uint64_t *value, |
246 |
unsigned size,
|
247 |
unsigned shift,
|
248 |
uint64_t mask) |
249 |
{ |
250 |
MemoryRegion *mr = opaque; |
251 |
uint64_t tmp; |
252 |
|
253 |
tmp = (*value >> shift) & mask; |
254 |
mr->ops->write(mr->opaque, addr, tmp, size); |
255 |
} |
256 |
|
257 |
static void access_with_adjusted_size(target_phys_addr_t addr, |
258 |
uint64_t *value, |
259 |
unsigned size,
|
260 |
unsigned access_size_min,
|
261 |
unsigned access_size_max,
|
262 |
void (*access)(void *opaque, |
263 |
target_phys_addr_t addr, |
264 |
uint64_t *value, |
265 |
unsigned size,
|
266 |
unsigned shift,
|
267 |
uint64_t mask), |
268 |
void *opaque)
|
269 |
{ |
270 |
uint64_t access_mask; |
271 |
unsigned access_size;
|
272 |
unsigned i;
|
273 |
|
274 |
if (!access_size_min) {
|
275 |
access_size_min = 1;
|
276 |
} |
277 |
if (!access_size_max) {
|
278 |
access_size_max = 4;
|
279 |
} |
280 |
access_size = MAX(MIN(size, access_size_max), access_size_min); |
281 |
access_mask = -1ULL >> (64 - access_size * 8); |
282 |
for (i = 0; i < size; i += access_size) { |
283 |
/* FIXME: big-endian support */
|
284 |
access(opaque, addr + i, value, access_size, i * 8, access_mask);
|
285 |
} |
286 |
} |
287 |
|
288 |
static void memory_region_prepare_ram_addr(MemoryRegion *mr); |
289 |
|
290 |
static void as_memory_range_add(AddressSpace *as, FlatRange *fr) |
291 |
{ |
292 |
ram_addr_t phys_offset, region_offset; |
293 |
|
294 |
memory_region_prepare_ram_addr(fr->mr); |
295 |
|
296 |
phys_offset = fr->mr->ram_addr; |
297 |
region_offset = fr->offset_in_region; |
298 |
/* cpu_register_physical_memory_log() wants region_offset for
|
299 |
* mmio, but prefers offseting phys_offset for RAM. Humour it.
|
300 |
*/
|
301 |
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
|
302 |
phys_offset += region_offset; |
303 |
region_offset = 0;
|
304 |
} |
305 |
|
306 |
if (!fr->readable) {
|
307 |
phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD; |
308 |
} |
309 |
|
310 |
cpu_register_physical_memory_log(fr->addr.start, |
311 |
fr->addr.size, |
312 |
phys_offset, |
313 |
region_offset, |
314 |
fr->dirty_log_mask); |
315 |
} |
316 |
|
317 |
static void as_memory_range_del(AddressSpace *as, FlatRange *fr) |
318 |
{ |
319 |
if (fr->dirty_log_mask) {
|
320 |
cpu_physical_sync_dirty_bitmap(fr->addr.start, |
321 |
fr->addr.start + fr->addr.size); |
322 |
} |
323 |
cpu_register_physical_memory(fr->addr.start, fr->addr.size, |
324 |
IO_MEM_UNASSIGNED); |
325 |
} |
326 |
|
327 |
static void as_memory_log_start(AddressSpace *as, FlatRange *fr) |
328 |
{ |
329 |
cpu_physical_log_start(fr->addr.start, fr->addr.size); |
330 |
} |
331 |
|
332 |
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) |
333 |
{ |
334 |
cpu_physical_log_stop(fr->addr.start, fr->addr.size); |
335 |
} |
336 |
|
337 |
static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
338 |
{ |
339 |
int r;
|
340 |
|
341 |
assert(fd->match_data && fd->addr.size == 4);
|
342 |
|
343 |
r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, true);
|
344 |
if (r < 0) { |
345 |
abort(); |
346 |
} |
347 |
} |
348 |
|
349 |
static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) |
350 |
{ |
351 |
int r;
|
352 |
|
353 |
r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, false);
|
354 |
if (r < 0) { |
355 |
abort(); |
356 |
} |
357 |
} |
358 |
|
359 |
static const AddressSpaceOps address_space_ops_memory = { |
360 |
.range_add = as_memory_range_add, |
361 |
.range_del = as_memory_range_del, |
362 |
.log_start = as_memory_log_start, |
363 |
.log_stop = as_memory_log_stop, |
364 |
.ioeventfd_add = as_memory_ioeventfd_add, |
365 |
.ioeventfd_del = as_memory_ioeventfd_del, |
366 |
}; |
367 |
|
368 |
static AddressSpace address_space_memory = {
|
369 |
.ops = &address_space_ops_memory, |
370 |
}; |
371 |
|
372 |
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset, |
373 |
unsigned width, bool write) |
374 |
{ |
375 |
const MemoryRegionPortio *mrp;
|
376 |
|
377 |
for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
|
378 |
if (offset >= mrp->offset && offset < mrp->offset + mrp->len
|
379 |
&& width == mrp->size |
380 |
&& (write ? (bool)mrp->write : (bool)mrp->read)) { |
381 |
return mrp;
|
382 |
} |
383 |
} |
384 |
return NULL; |
385 |
} |
386 |
|
387 |
static void memory_region_iorange_read(IORange *iorange, |
388 |
uint64_t offset, |
389 |
unsigned width,
|
390 |
uint64_t *data) |
391 |
{ |
392 |
MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); |
393 |
|
394 |
if (mr->ops->old_portio) {
|
395 |
const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false); |
396 |
|
397 |
*data = ((uint64_t)1 << (width * 8)) - 1; |
398 |
if (mrp) {
|
399 |
*data = mrp->read(mr->opaque, offset); |
400 |
} |
401 |
return;
|
402 |
} |
403 |
*data = 0;
|
404 |
access_with_adjusted_size(offset, data, width, |
405 |
mr->ops->impl.min_access_size, |
406 |
mr->ops->impl.max_access_size, |
407 |
memory_region_read_accessor, mr); |
408 |
} |
409 |
|
410 |
static void memory_region_iorange_write(IORange *iorange, |
411 |
uint64_t offset, |
412 |
unsigned width,
|
413 |
uint64_t data) |
414 |
{ |
415 |
MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); |
416 |
|
417 |
if (mr->ops->old_portio) {
|
418 |
const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true); |
419 |
|
420 |
if (mrp) {
|
421 |
mrp->write(mr->opaque, offset, data); |
422 |
} |
423 |
return;
|
424 |
} |
425 |
access_with_adjusted_size(offset, &data, width, |
426 |
mr->ops->impl.min_access_size, |
427 |
mr->ops->impl.max_access_size, |
428 |
memory_region_write_accessor, mr); |
429 |
} |
430 |
|
431 |
static const IORangeOps memory_region_iorange_ops = { |
432 |
.read = memory_region_iorange_read, |
433 |
.write = memory_region_iorange_write, |
434 |
}; |
435 |
|
436 |
static void as_io_range_add(AddressSpace *as, FlatRange *fr) |
437 |
{ |
438 |
iorange_init(&fr->mr->iorange, &memory_region_iorange_ops, |
439 |
fr->addr.start,fr->addr.size); |
440 |
ioport_register(&fr->mr->iorange); |
441 |
} |
442 |
|
443 |
static void as_io_range_del(AddressSpace *as, FlatRange *fr) |
444 |
{ |
445 |
isa_unassign_ioport(fr->addr.start, fr->addr.size); |
446 |
} |
447 |
|
448 |
static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
449 |
{ |
450 |
int r;
|
451 |
|
452 |
assert(fd->match_data && fd->addr.size == 2);
|
453 |
|
454 |
r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, true);
|
455 |
if (r < 0) { |
456 |
abort(); |
457 |
} |
458 |
} |
459 |
|
460 |
static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) |
461 |
{ |
462 |
int r;
|
463 |
|
464 |
r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, false);
|
465 |
if (r < 0) { |
466 |
abort(); |
467 |
} |
468 |
} |
469 |
|
470 |
static const AddressSpaceOps address_space_ops_io = { |
471 |
.range_add = as_io_range_add, |
472 |
.range_del = as_io_range_del, |
473 |
.ioeventfd_add = as_io_ioeventfd_add, |
474 |
.ioeventfd_del = as_io_ioeventfd_del, |
475 |
}; |
476 |
|
477 |
static AddressSpace address_space_io = {
|
478 |
.ops = &address_space_ops_io, |
479 |
}; |
480 |
|
481 |
/* Render a memory region into the global view. Ranges in @view obscure
|
482 |
* ranges in @mr.
|
483 |
*/
|
484 |
static void render_memory_region(FlatView *view, |
485 |
MemoryRegion *mr, |
486 |
target_phys_addr_t base, |
487 |
AddrRange clip) |
488 |
{ |
489 |
MemoryRegion *subregion; |
490 |
unsigned i;
|
491 |
target_phys_addr_t offset_in_region; |
492 |
int64_t remain; |
493 |
int64_t now; |
494 |
FlatRange fr; |
495 |
AddrRange tmp; |
496 |
|
497 |
base += mr->addr; |
498 |
|
499 |
tmp = addrrange_make(base, mr->size); |
500 |
|
501 |
if (!addrrange_intersects(tmp, clip)) {
|
502 |
return;
|
503 |
} |
504 |
|
505 |
clip = addrrange_intersection(tmp, clip); |
506 |
|
507 |
if (mr->alias) {
|
508 |
base -= mr->alias->addr; |
509 |
base -= mr->alias_offset; |
510 |
render_memory_region(view, mr->alias, base, clip); |
511 |
return;
|
512 |
} |
513 |
|
514 |
/* Render subregions in priority order. */
|
515 |
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
516 |
render_memory_region(view, subregion, base, clip); |
517 |
} |
518 |
|
519 |
if (!mr->terminates) {
|
520 |
return;
|
521 |
} |
522 |
|
523 |
offset_in_region = clip.start - base; |
524 |
base = clip.start; |
525 |
remain = clip.size; |
526 |
|
527 |
/* Render the region itself into any gaps left by the current view. */
|
528 |
for (i = 0; i < view->nr && remain; ++i) { |
529 |
if (base >= addrrange_end(view->ranges[i].addr)) {
|
530 |
continue;
|
531 |
} |
532 |
if (base < view->ranges[i].addr.start) {
|
533 |
now = MIN(remain, view->ranges[i].addr.start - base); |
534 |
fr.mr = mr; |
535 |
fr.offset_in_region = offset_in_region; |
536 |
fr.addr = addrrange_make(base, now); |
537 |
fr.dirty_log_mask = mr->dirty_log_mask; |
538 |
fr.readable = mr->readable; |
539 |
flatview_insert(view, i, &fr); |
540 |
++i; |
541 |
base += now; |
542 |
offset_in_region += now; |
543 |
remain -= now; |
544 |
} |
545 |
if (base == view->ranges[i].addr.start) {
|
546 |
now = MIN(remain, view->ranges[i].addr.size); |
547 |
base += now; |
548 |
offset_in_region += now; |
549 |
remain -= now; |
550 |
} |
551 |
} |
552 |
if (remain) {
|
553 |
fr.mr = mr; |
554 |
fr.offset_in_region = offset_in_region; |
555 |
fr.addr = addrrange_make(base, remain); |
556 |
fr.dirty_log_mask = mr->dirty_log_mask; |
557 |
fr.readable = mr->readable; |
558 |
flatview_insert(view, i, &fr); |
559 |
} |
560 |
} |
561 |
|
562 |
/* Render a memory topology into a list of disjoint absolute ranges. */
|
563 |
static FlatView generate_memory_topology(MemoryRegion *mr)
|
564 |
{ |
565 |
FlatView view; |
566 |
|
567 |
flatview_init(&view); |
568 |
|
569 |
render_memory_region(&view, mr, 0, addrrange_make(0, INT64_MAX)); |
570 |
flatview_simplify(&view); |
571 |
|
572 |
return view;
|
573 |
} |
574 |
|
575 |
static void address_space_add_del_ioeventfds(AddressSpace *as, |
576 |
MemoryRegionIoeventfd *fds_new, |
577 |
unsigned fds_new_nb,
|
578 |
MemoryRegionIoeventfd *fds_old, |
579 |
unsigned fds_old_nb)
|
580 |
{ |
581 |
unsigned iold, inew;
|
582 |
|
583 |
/* Generate a symmetric difference of the old and new fd sets, adding
|
584 |
* and deleting as necessary.
|
585 |
*/
|
586 |
|
587 |
iold = inew = 0;
|
588 |
while (iold < fds_old_nb || inew < fds_new_nb) {
|
589 |
if (iold < fds_old_nb
|
590 |
&& (inew == fds_new_nb |
591 |
|| memory_region_ioeventfd_before(fds_old[iold], |
592 |
fds_new[inew]))) { |
593 |
as->ops->ioeventfd_del(as, &fds_old[iold]); |
594 |
++iold; |
595 |
} else if (inew < fds_new_nb |
596 |
&& (iold == fds_old_nb |
597 |
|| memory_region_ioeventfd_before(fds_new[inew], |
598 |
fds_old[iold]))) { |
599 |
as->ops->ioeventfd_add(as, &fds_new[inew]); |
600 |
++inew; |
601 |
} else {
|
602 |
++iold; |
603 |
++inew; |
604 |
} |
605 |
} |
606 |
} |
607 |
|
608 |
static void address_space_update_ioeventfds(AddressSpace *as) |
609 |
{ |
610 |
FlatRange *fr; |
611 |
unsigned ioeventfd_nb = 0; |
612 |
MemoryRegionIoeventfd *ioeventfds = NULL;
|
613 |
AddrRange tmp; |
614 |
unsigned i;
|
615 |
|
616 |
FOR_EACH_FLAT_RANGE(fr, &as->current_map) { |
617 |
for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { |
618 |
tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, |
619 |
fr->addr.start - fr->offset_in_region); |
620 |
if (addrrange_intersects(fr->addr, tmp)) {
|
621 |
++ioeventfd_nb; |
622 |
ioeventfds = g_realloc(ioeventfds, |
623 |
ioeventfd_nb * sizeof(*ioeventfds));
|
624 |
ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
|
625 |
ioeventfds[ioeventfd_nb-1].addr = tmp;
|
626 |
} |
627 |
} |
628 |
} |
629 |
|
630 |
address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, |
631 |
as->ioeventfds, as->ioeventfd_nb); |
632 |
|
633 |
g_free(as->ioeventfds); |
634 |
as->ioeventfds = ioeventfds; |
635 |
as->ioeventfd_nb = ioeventfd_nb; |
636 |
} |
637 |
|
638 |
static void address_space_update_topology_pass(AddressSpace *as, |
639 |
FlatView old_view, |
640 |
FlatView new_view, |
641 |
bool adding)
|
642 |
{ |
643 |
unsigned iold, inew;
|
644 |
FlatRange *frold, *frnew; |
645 |
|
646 |
/* Generate a symmetric difference of the old and new memory maps.
|
647 |
* Kill ranges in the old map, and instantiate ranges in the new map.
|
648 |
*/
|
649 |
iold = inew = 0;
|
650 |
while (iold < old_view.nr || inew < new_view.nr) {
|
651 |
if (iold < old_view.nr) {
|
652 |
frold = &old_view.ranges[iold]; |
653 |
} else {
|
654 |
frold = NULL;
|
655 |
} |
656 |
if (inew < new_view.nr) {
|
657 |
frnew = &new_view.ranges[inew]; |
658 |
} else {
|
659 |
frnew = NULL;
|
660 |
} |
661 |
|
662 |
if (frold
|
663 |
&& (!frnew |
664 |
|| frold->addr.start < frnew->addr.start |
665 |
|| (frold->addr.start == frnew->addr.start |
666 |
&& !flatrange_equal(frold, frnew)))) { |
667 |
/* In old, but (not in new, or in new but attributes changed). */
|
668 |
|
669 |
if (!adding) {
|
670 |
as->ops->range_del(as, frold); |
671 |
} |
672 |
|
673 |
++iold; |
674 |
} else if (frold && frnew && flatrange_equal(frold, frnew)) { |
675 |
/* In both (logging may have changed) */
|
676 |
|
677 |
if (adding) {
|
678 |
if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
|
679 |
as->ops->log_stop(as, frnew); |
680 |
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
681 |
as->ops->log_start(as, frnew); |
682 |
} |
683 |
} |
684 |
|
685 |
++iold; |
686 |
++inew; |
687 |
} else {
|
688 |
/* In new */
|
689 |
|
690 |
if (adding) {
|
691 |
as->ops->range_add(as, frnew); |
692 |
} |
693 |
|
694 |
++inew; |
695 |
} |
696 |
} |
697 |
} |
698 |
|
699 |
|
700 |
static void address_space_update_topology(AddressSpace *as) |
701 |
{ |
702 |
FlatView old_view = as->current_map; |
703 |
FlatView new_view = generate_memory_topology(as->root); |
704 |
|
705 |
address_space_update_topology_pass(as, old_view, new_view, false);
|
706 |
address_space_update_topology_pass(as, old_view, new_view, true);
|
707 |
|
708 |
as->current_map = new_view; |
709 |
flatview_destroy(&old_view); |
710 |
address_space_update_ioeventfds(as); |
711 |
} |
712 |
|
713 |
static void memory_region_update_topology(void) |
714 |
{ |
715 |
if (memory_region_transaction_depth) {
|
716 |
return;
|
717 |
} |
718 |
|
719 |
if (address_space_memory.root) {
|
720 |
address_space_update_topology(&address_space_memory); |
721 |
} |
722 |
if (address_space_io.root) {
|
723 |
address_space_update_topology(&address_space_io); |
724 |
} |
725 |
} |
726 |
|
727 |
void memory_region_transaction_begin(void) |
728 |
{ |
729 |
++memory_region_transaction_depth; |
730 |
} |
731 |
|
732 |
void memory_region_transaction_commit(void) |
733 |
{ |
734 |
assert(memory_region_transaction_depth); |
735 |
--memory_region_transaction_depth; |
736 |
memory_region_update_topology(); |
737 |
} |
738 |
|
739 |
static void memory_region_destructor_none(MemoryRegion *mr) |
740 |
{ |
741 |
} |
742 |
|
743 |
static void memory_region_destructor_ram(MemoryRegion *mr) |
744 |
{ |
745 |
qemu_ram_free(mr->ram_addr); |
746 |
} |
747 |
|
748 |
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) |
749 |
{ |
750 |
qemu_ram_free_from_ptr(mr->ram_addr); |
751 |
} |
752 |
|
753 |
static void memory_region_destructor_iomem(MemoryRegion *mr) |
754 |
{ |
755 |
cpu_unregister_io_memory(mr->ram_addr); |
756 |
} |
757 |
|
758 |
static void memory_region_destructor_rom_device(MemoryRegion *mr) |
759 |
{ |
760 |
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); |
761 |
cpu_unregister_io_memory(mr->ram_addr & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); |
762 |
} |
763 |
|
764 |
void memory_region_init(MemoryRegion *mr,
|
765 |
const char *name, |
766 |
uint64_t size) |
767 |
{ |
768 |
mr->ops = NULL;
|
769 |
mr->parent = NULL;
|
770 |
mr->size = size; |
771 |
mr->addr = 0;
|
772 |
mr->offset = 0;
|
773 |
mr->terminates = false;
|
774 |
mr->readable = true;
|
775 |
mr->destructor = memory_region_destructor_none; |
776 |
mr->priority = 0;
|
777 |
mr->may_overlap = false;
|
778 |
mr->alias = NULL;
|
779 |
QTAILQ_INIT(&mr->subregions); |
780 |
memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
781 |
QTAILQ_INIT(&mr->coalesced); |
782 |
mr->name = g_strdup(name); |
783 |
mr->dirty_log_mask = 0;
|
784 |
mr->ioeventfd_nb = 0;
|
785 |
mr->ioeventfds = NULL;
|
786 |
} |
787 |
|
788 |
static bool memory_region_access_valid(MemoryRegion *mr, |
789 |
target_phys_addr_t addr, |
790 |
unsigned size)
|
791 |
{ |
792 |
if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
793 |
return false; |
794 |
} |
795 |
|
796 |
/* Treat zero as compatibility all valid */
|
797 |
if (!mr->ops->valid.max_access_size) {
|
798 |
return true; |
799 |
} |
800 |
|
801 |
if (size > mr->ops->valid.max_access_size
|
802 |
|| size < mr->ops->valid.min_access_size) { |
803 |
return false; |
804 |
} |
805 |
return true; |
806 |
} |
807 |
|
808 |
static uint32_t memory_region_read_thunk_n(void *_mr, |
809 |
target_phys_addr_t addr, |
810 |
unsigned size)
|
811 |
{ |
812 |
MemoryRegion *mr = _mr; |
813 |
uint64_t data = 0;
|
814 |
|
815 |
if (!memory_region_access_valid(mr, addr, size)) {
|
816 |
return -1U; /* FIXME: better signalling */ |
817 |
} |
818 |
|
819 |
if (!mr->ops->read) {
|
820 |
return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
|
821 |
} |
822 |
|
823 |
/* FIXME: support unaligned access */
|
824 |
access_with_adjusted_size(addr + mr->offset, &data, size, |
825 |
mr->ops->impl.min_access_size, |
826 |
mr->ops->impl.max_access_size, |
827 |
memory_region_read_accessor, mr); |
828 |
|
829 |
return data;
|
830 |
} |
831 |
|
832 |
static void memory_region_write_thunk_n(void *_mr, |
833 |
target_phys_addr_t addr, |
834 |
unsigned size,
|
835 |
uint64_t data) |
836 |
{ |
837 |
MemoryRegion *mr = _mr; |
838 |
|
839 |
if (!memory_region_access_valid(mr, addr, size)) {
|
840 |
return; /* FIXME: better signalling */ |
841 |
} |
842 |
|
843 |
if (!mr->ops->write) {
|
844 |
mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data); |
845 |
return;
|
846 |
} |
847 |
|
848 |
/* FIXME: support unaligned access */
|
849 |
access_with_adjusted_size(addr + mr->offset, &data, size, |
850 |
mr->ops->impl.min_access_size, |
851 |
mr->ops->impl.max_access_size, |
852 |
memory_region_write_accessor, mr); |
853 |
} |
854 |
|
855 |
static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr) |
856 |
{ |
857 |
return memory_region_read_thunk_n(mr, addr, 1); |
858 |
} |
859 |
|
860 |
static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr) |
861 |
{ |
862 |
return memory_region_read_thunk_n(mr, addr, 2); |
863 |
} |
864 |
|
865 |
static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr) |
866 |
{ |
867 |
return memory_region_read_thunk_n(mr, addr, 4); |
868 |
} |
869 |
|
870 |
static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr, |
871 |
uint32_t data) |
872 |
{ |
873 |
memory_region_write_thunk_n(mr, addr, 1, data);
|
874 |
} |
875 |
|
876 |
static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr, |
877 |
uint32_t data) |
878 |
{ |
879 |
memory_region_write_thunk_n(mr, addr, 2, data);
|
880 |
} |
881 |
|
882 |
static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr, |
883 |
uint32_t data) |
884 |
{ |
885 |
memory_region_write_thunk_n(mr, addr, 4, data);
|
886 |
} |
887 |
|
888 |
static CPUReadMemoryFunc * const memory_region_read_thunk[] = { |
889 |
memory_region_read_thunk_b, |
890 |
memory_region_read_thunk_w, |
891 |
memory_region_read_thunk_l, |
892 |
}; |
893 |
|
894 |
static CPUWriteMemoryFunc * const memory_region_write_thunk[] = { |
895 |
memory_region_write_thunk_b, |
896 |
memory_region_write_thunk_w, |
897 |
memory_region_write_thunk_l, |
898 |
}; |
899 |
|
900 |
static void memory_region_prepare_ram_addr(MemoryRegion *mr) |
901 |
{ |
902 |
if (mr->backend_registered) {
|
903 |
return;
|
904 |
} |
905 |
|
906 |
mr->destructor = memory_region_destructor_iomem; |
907 |
mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk, |
908 |
memory_region_write_thunk, |
909 |
mr, |
910 |
mr->ops->endianness); |
911 |
mr->backend_registered = true;
|
912 |
} |
913 |
|
914 |
void memory_region_init_io(MemoryRegion *mr,
|
915 |
const MemoryRegionOps *ops,
|
916 |
void *opaque,
|
917 |
const char *name, |
918 |
uint64_t size) |
919 |
{ |
920 |
memory_region_init(mr, name, size); |
921 |
mr->ops = ops; |
922 |
mr->opaque = opaque; |
923 |
mr->terminates = true;
|
924 |
mr->backend_registered = false;
|
925 |
} |
926 |
|
927 |
void memory_region_init_ram(MemoryRegion *mr,
|
928 |
DeviceState *dev, |
929 |
const char *name, |
930 |
uint64_t size) |
931 |
{ |
932 |
memory_region_init(mr, name, size); |
933 |
mr->terminates = true;
|
934 |
mr->destructor = memory_region_destructor_ram; |
935 |
mr->ram_addr = qemu_ram_alloc(dev, name, size); |
936 |
mr->backend_registered = true;
|
937 |
} |
938 |
|
939 |
void memory_region_init_ram_ptr(MemoryRegion *mr,
|
940 |
DeviceState *dev, |
941 |
const char *name, |
942 |
uint64_t size, |
943 |
void *ptr)
|
944 |
{ |
945 |
memory_region_init(mr, name, size); |
946 |
mr->terminates = true;
|
947 |
mr->destructor = memory_region_destructor_ram_from_ptr; |
948 |
mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr); |
949 |
mr->backend_registered = true;
|
950 |
} |
951 |
|
952 |
void memory_region_init_alias(MemoryRegion *mr,
|
953 |
const char *name, |
954 |
MemoryRegion *orig, |
955 |
target_phys_addr_t offset, |
956 |
uint64_t size) |
957 |
{ |
958 |
memory_region_init(mr, name, size); |
959 |
mr->alias = orig; |
960 |
mr->alias_offset = offset; |
961 |
} |
962 |
|
963 |
void memory_region_init_rom_device(MemoryRegion *mr,
|
964 |
const MemoryRegionOps *ops,
|
965 |
void *opaque,
|
966 |
DeviceState *dev, |
967 |
const char *name, |
968 |
uint64_t size) |
969 |
{ |
970 |
memory_region_init(mr, name, size); |
971 |
mr->ops = ops; |
972 |
mr->opaque = opaque; |
973 |
mr->terminates = true;
|
974 |
mr->destructor = memory_region_destructor_rom_device; |
975 |
mr->ram_addr = qemu_ram_alloc(dev, name, size); |
976 |
mr->ram_addr |= cpu_register_io_memory(memory_region_read_thunk, |
977 |
memory_region_write_thunk, |
978 |
mr, |
979 |
mr->ops->endianness); |
980 |
mr->ram_addr |= IO_MEM_ROMD; |
981 |
mr->backend_registered = true;
|
982 |
} |
983 |
|
984 |
void memory_region_destroy(MemoryRegion *mr)
|
985 |
{ |
986 |
assert(QTAILQ_EMPTY(&mr->subregions)); |
987 |
mr->destructor(mr); |
988 |
memory_region_clear_coalescing(mr); |
989 |
g_free((char *)mr->name);
|
990 |
g_free(mr->ioeventfds); |
991 |
} |
992 |
|
993 |
uint64_t memory_region_size(MemoryRegion *mr) |
994 |
{ |
995 |
return mr->size;
|
996 |
} |
997 |
|
998 |
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
|
999 |
{ |
1000 |
mr->offset = offset; |
1001 |
} |
1002 |
|
1003 |
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
1004 |
{ |
1005 |
uint8_t mask = 1 << client;
|
1006 |
|
1007 |
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
1008 |
memory_region_update_topology(); |
1009 |
} |
1010 |
|
1011 |
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1012 |
unsigned client)
|
1013 |
{ |
1014 |
assert(mr->terminates); |
1015 |
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client); |
1016 |
} |
1017 |
|
1018 |
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
|
1019 |
{ |
1020 |
assert(mr->terminates); |
1021 |
return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
|
1022 |
} |
1023 |
|
1024 |
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
|
1025 |
{ |
1026 |
FlatRange *fr; |
1027 |
|
1028 |
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
1029 |
if (fr->mr == mr) {
|
1030 |
cpu_physical_sync_dirty_bitmap(fr->addr.start, |
1031 |
fr->addr.start + fr->addr.size); |
1032 |
} |
1033 |
} |
1034 |
} |
1035 |
|
1036 |
void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
1037 |
{ |
1038 |
/* FIXME */
|
1039 |
} |
1040 |
|
1041 |
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable) |
1042 |
{ |
1043 |
if (mr->readable != readable) {
|
1044 |
mr->readable = readable; |
1045 |
memory_region_update_topology(); |
1046 |
} |
1047 |
} |
1048 |
|
1049 |
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
|
1050 |
target_phys_addr_t size, unsigned client)
|
1051 |
{ |
1052 |
assert(mr->terminates); |
1053 |
cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
1054 |
mr->ram_addr + addr + size, |
1055 |
1 << client);
|
1056 |
} |
1057 |
|
1058 |
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
1059 |
{ |
1060 |
if (mr->alias) {
|
1061 |
return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
|
1062 |
} |
1063 |
|
1064 |
assert(mr->terminates); |
1065 |
|
1066 |
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
|
1067 |
} |
1068 |
|
1069 |
static void memory_region_update_coalesced_range(MemoryRegion *mr) |
1070 |
{ |
1071 |
FlatRange *fr; |
1072 |
CoalescedMemoryRange *cmr; |
1073 |
AddrRange tmp; |
1074 |
|
1075 |
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
1076 |
if (fr->mr == mr) {
|
1077 |
qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size); |
1078 |
QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
1079 |
tmp = addrrange_shift(cmr->addr, |
1080 |
fr->addr.start - fr->offset_in_region); |
1081 |
if (!addrrange_intersects(tmp, fr->addr)) {
|
1082 |
continue;
|
1083 |
} |
1084 |
tmp = addrrange_intersection(tmp, fr->addr); |
1085 |
qemu_register_coalesced_mmio(tmp.start, tmp.size); |
1086 |
} |
1087 |
} |
1088 |
} |
1089 |
} |
1090 |
|
1091 |
void memory_region_set_coalescing(MemoryRegion *mr)
|
1092 |
{ |
1093 |
memory_region_clear_coalescing(mr); |
1094 |
memory_region_add_coalescing(mr, 0, mr->size);
|
1095 |
} |
1096 |
|
1097 |
void memory_region_add_coalescing(MemoryRegion *mr,
|
1098 |
target_phys_addr_t offset, |
1099 |
uint64_t size) |
1100 |
{ |
1101 |
CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
|
1102 |
|
1103 |
cmr->addr = addrrange_make(offset, size); |
1104 |
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
1105 |
memory_region_update_coalesced_range(mr); |
1106 |
} |
1107 |
|
1108 |
void memory_region_clear_coalescing(MemoryRegion *mr)
|
1109 |
{ |
1110 |
CoalescedMemoryRange *cmr; |
1111 |
|
1112 |
while (!QTAILQ_EMPTY(&mr->coalesced)) {
|
1113 |
cmr = QTAILQ_FIRST(&mr->coalesced); |
1114 |
QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
1115 |
g_free(cmr); |
1116 |
} |
1117 |
memory_region_update_coalesced_range(mr); |
1118 |
} |
1119 |
|
1120 |
void memory_region_add_eventfd(MemoryRegion *mr,
|
1121 |
target_phys_addr_t addr, |
1122 |
unsigned size,
|
1123 |
bool match_data,
|
1124 |
uint64_t data, |
1125 |
int fd)
|
1126 |
{ |
1127 |
MemoryRegionIoeventfd mrfd = { |
1128 |
.addr.start = addr, |
1129 |
.addr.size = size, |
1130 |
.match_data = match_data, |
1131 |
.data = data, |
1132 |
.fd = fd, |
1133 |
}; |
1134 |
unsigned i;
|
1135 |
|
1136 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1137 |
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
|
1138 |
break;
|
1139 |
} |
1140 |
} |
1141 |
++mr->ioeventfd_nb; |
1142 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1143 |
sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
|
1144 |
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
|
1145 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); |
1146 |
mr->ioeventfds[i] = mrfd; |
1147 |
memory_region_update_topology(); |
1148 |
} |
1149 |
|
1150 |
void memory_region_del_eventfd(MemoryRegion *mr,
|
1151 |
target_phys_addr_t addr, |
1152 |
unsigned size,
|
1153 |
bool match_data,
|
1154 |
uint64_t data, |
1155 |
int fd)
|
1156 |
{ |
1157 |
MemoryRegionIoeventfd mrfd = { |
1158 |
.addr.start = addr, |
1159 |
.addr.size = size, |
1160 |
.match_data = match_data, |
1161 |
.data = data, |
1162 |
.fd = fd, |
1163 |
}; |
1164 |
unsigned i;
|
1165 |
|
1166 |
for (i = 0; i < mr->ioeventfd_nb; ++i) { |
1167 |
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
|
1168 |
break;
|
1169 |
} |
1170 |
} |
1171 |
assert(i != mr->ioeventfd_nb); |
1172 |
memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
|
1173 |
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); |
1174 |
--mr->ioeventfd_nb; |
1175 |
mr->ioeventfds = g_realloc(mr->ioeventfds, |
1176 |
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); |
1177 |
memory_region_update_topology(); |
1178 |
} |
1179 |
|
1180 |
static void memory_region_add_subregion_common(MemoryRegion *mr, |
1181 |
target_phys_addr_t offset, |
1182 |
MemoryRegion *subregion) |
1183 |
{ |
1184 |
MemoryRegion *other; |
1185 |
|
1186 |
assert(!subregion->parent); |
1187 |
subregion->parent = mr; |
1188 |
subregion->addr = offset; |
1189 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1190 |
if (subregion->may_overlap || other->may_overlap) {
|
1191 |
continue;
|
1192 |
} |
1193 |
if (offset >= other->offset + other->size
|
1194 |
|| offset + subregion->size <= other->offset) { |
1195 |
continue;
|
1196 |
} |
1197 |
#if 0
|
1198 |
printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
|
1199 |
(unsigned long long)offset,
|
1200 |
(unsigned long long)subregion->size,
|
1201 |
(unsigned long long)other->offset,
|
1202 |
(unsigned long long)other->size);
|
1203 |
#endif
|
1204 |
} |
1205 |
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
1206 |
if (subregion->priority >= other->priority) {
|
1207 |
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
1208 |
goto done;
|
1209 |
} |
1210 |
} |
1211 |
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
1212 |
done:
|
1213 |
memory_region_update_topology(); |
1214 |
} |
1215 |
|
1216 |
|
1217 |
void memory_region_add_subregion(MemoryRegion *mr,
|
1218 |
target_phys_addr_t offset, |
1219 |
MemoryRegion *subregion) |
1220 |
{ |
1221 |
subregion->may_overlap = false;
|
1222 |
subregion->priority = 0;
|
1223 |
memory_region_add_subregion_common(mr, offset, subregion); |
1224 |
} |
1225 |
|
1226 |
void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
1227 |
target_phys_addr_t offset, |
1228 |
MemoryRegion *subregion, |
1229 |
unsigned priority)
|
1230 |
{ |
1231 |
subregion->may_overlap = true;
|
1232 |
subregion->priority = priority; |
1233 |
memory_region_add_subregion_common(mr, offset, subregion); |
1234 |
} |
1235 |
|
1236 |
void memory_region_del_subregion(MemoryRegion *mr,
|
1237 |
MemoryRegion *subregion) |
1238 |
{ |
1239 |
assert(subregion->parent == mr); |
1240 |
subregion->parent = NULL;
|
1241 |
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
1242 |
memory_region_update_topology(); |
1243 |
} |
1244 |
|
1245 |
void set_system_memory_map(MemoryRegion *mr)
|
1246 |
{ |
1247 |
address_space_memory.root = mr; |
1248 |
memory_region_update_topology(); |
1249 |
} |
1250 |
|
1251 |
void set_system_io_map(MemoryRegion *mr)
|
1252 |
{ |
1253 |
address_space_io.root = mr; |
1254 |
memory_region_update_topology(); |
1255 |
} |