Statistics
| Branch: | Revision:

root / memory.c @ 897fa7cf

History | View | Annotate | Download (43.4 kB)

1
/*
2
 * Physical memory management
3
 *
4
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5
 *
6
 * Authors:
7
 *  Avi Kivity <avi@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "memory.h"
15
#include "exec-memory.h"
16
#include "ioport.h"
17
#include "bitops.h"
18
#include "kvm.h"
19
#include <assert.h>
20

    
21
unsigned memory_region_transaction_depth = 0;
22

    
23
typedef struct AddrRange AddrRange;
24

    
25
/*
26
 * Note using signed integers limits us to physical addresses at most
27
 * 63 bits wide.  They are needed for negative offsetting in aliases
28
 * (large MemoryRegion::alias_offset).
29
 */
30
struct AddrRange {
31
    Int128 start;
32
    Int128 size;
33
};
34

    
35
static AddrRange addrrange_make(Int128 start, Int128 size)
36
{
37
    return (AddrRange) { start, size };
38
}
39

    
40
static bool addrrange_equal(AddrRange r1, AddrRange r2)
41
{
42
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
43
}
44

    
45
static Int128 addrrange_end(AddrRange r)
46
{
47
    return int128_add(r.start, r.size);
48
}
49

    
50
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
51
{
52
    int128_addto(&range.start, delta);
53
    return range;
54
}
55

    
56
static bool addrrange_contains(AddrRange range, Int128 addr)
57
{
58
    return int128_ge(addr, range.start)
59
        && int128_lt(addr, addrrange_end(range));
60
}
61

    
62
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
63
{
64
    return addrrange_contains(r1, r2.start)
65
        || addrrange_contains(r2, r1.start);
66
}
67

    
68
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
69
{
70
    Int128 start = int128_max(r1.start, r2.start);
71
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
72
    return addrrange_make(start, int128_sub(end, start));
73
}
74

    
75
struct CoalescedMemoryRange {
76
    AddrRange addr;
77
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
78
};
79

    
80
struct MemoryRegionIoeventfd {
81
    AddrRange addr;
82
    bool match_data;
83
    uint64_t data;
84
    int fd;
85
};
86

    
87
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
88
                                           MemoryRegionIoeventfd b)
89
{
90
    if (int128_lt(a.addr.start, b.addr.start)) {
91
        return true;
92
    } else if (int128_gt(a.addr.start, b.addr.start)) {
93
        return false;
94
    } else if (int128_lt(a.addr.size, b.addr.size)) {
95
        return true;
96
    } else if (int128_gt(a.addr.size, b.addr.size)) {
97
        return false;
98
    } else if (a.match_data < b.match_data) {
99
        return true;
100
    } else  if (a.match_data > b.match_data) {
101
        return false;
102
    } else if (a.match_data) {
103
        if (a.data < b.data) {
104
            return true;
105
        } else if (a.data > b.data) {
106
            return false;
107
        }
108
    }
109
    if (a.fd < b.fd) {
110
        return true;
111
    } else if (a.fd > b.fd) {
112
        return false;
113
    }
114
    return false;
115
}
116

    
117
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
118
                                          MemoryRegionIoeventfd b)
119
{
120
    return !memory_region_ioeventfd_before(a, b)
121
        && !memory_region_ioeventfd_before(b, a);
122
}
123

    
124
typedef struct FlatRange FlatRange;
125
typedef struct FlatView FlatView;
126

    
127
/* Range of memory in the global map.  Addresses are absolute. */
128
struct FlatRange {
129
    MemoryRegion *mr;
130
    target_phys_addr_t offset_in_region;
131
    AddrRange addr;
132
    uint8_t dirty_log_mask;
133
    bool readable;
134
    bool readonly;
135
};
136

    
137
/* Flattened global view of current active memory hierarchy.  Kept in sorted
138
 * order.
139
 */
140
struct FlatView {
141
    FlatRange *ranges;
142
    unsigned nr;
143
    unsigned nr_allocated;
144
};
145

    
146
typedef struct AddressSpace AddressSpace;
147
typedef struct AddressSpaceOps AddressSpaceOps;
148

    
149
/* A system address space - I/O, memory, etc. */
150
struct AddressSpace {
151
    const AddressSpaceOps *ops;
152
    MemoryRegion *root;
153
    FlatView current_map;
154
    int ioeventfd_nb;
155
    MemoryRegionIoeventfd *ioeventfds;
156
};
157

    
158
struct AddressSpaceOps {
159
    void (*range_add)(AddressSpace *as, FlatRange *fr);
160
    void (*range_del)(AddressSpace *as, FlatRange *fr);
161
    void (*log_start)(AddressSpace *as, FlatRange *fr);
162
    void (*log_stop)(AddressSpace *as, FlatRange *fr);
163
    void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
164
    void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
165
};
166

    
167
#define FOR_EACH_FLAT_RANGE(var, view)          \
168
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
169

    
170
static bool flatrange_equal(FlatRange *a, FlatRange *b)
171
{
172
    return a->mr == b->mr
173
        && addrrange_equal(a->addr, b->addr)
174
        && a->offset_in_region == b->offset_in_region
175
        && a->readable == b->readable
176
        && a->readonly == b->readonly;
177
}
178

    
179
static void flatview_init(FlatView *view)
180
{
181
    view->ranges = NULL;
182
    view->nr = 0;
183
    view->nr_allocated = 0;
184
}
185

    
186
/* Insert a range into a given position.  Caller is responsible for maintaining
187
 * sorting order.
188
 */
189
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
190
{
191
    if (view->nr == view->nr_allocated) {
192
        view->nr_allocated = MAX(2 * view->nr, 10);
193
        view->ranges = g_realloc(view->ranges,
194
                                    view->nr_allocated * sizeof(*view->ranges));
195
    }
196
    memmove(view->ranges + pos + 1, view->ranges + pos,
197
            (view->nr - pos) * sizeof(FlatRange));
198
    view->ranges[pos] = *range;
199
    ++view->nr;
200
}
201

    
202
static void flatview_destroy(FlatView *view)
203
{
204
    g_free(view->ranges);
205
}
206

    
207
static bool can_merge(FlatRange *r1, FlatRange *r2)
208
{
209
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
210
        && r1->mr == r2->mr
211
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
212
                                r1->addr.size),
213
                     int128_make64(r2->offset_in_region))
214
        && r1->dirty_log_mask == r2->dirty_log_mask
215
        && r1->readable == r2->readable
216
        && r1->readonly == r2->readonly;
217
}
218

    
219
/* Attempt to simplify a view by merging ajacent ranges */
220
static void flatview_simplify(FlatView *view)
221
{
222
    unsigned i, j;
223

    
224
    i = 0;
225
    while (i < view->nr) {
226
        j = i + 1;
227
        while (j < view->nr
228
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
229
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
230
            ++j;
231
        }
232
        ++i;
233
        memmove(&view->ranges[i], &view->ranges[j],
234
                (view->nr - j) * sizeof(view->ranges[j]));
235
        view->nr -= j - i;
236
    }
237
}
238

    
239
static void memory_region_read_accessor(void *opaque,
240
                                        target_phys_addr_t addr,
241
                                        uint64_t *value,
242
                                        unsigned size,
243
                                        unsigned shift,
244
                                        uint64_t mask)
245
{
246
    MemoryRegion *mr = opaque;
247
    uint64_t tmp;
248

    
249
    tmp = mr->ops->read(mr->opaque, addr, size);
250
    *value |= (tmp & mask) << shift;
251
}
252

    
253
static void memory_region_write_accessor(void *opaque,
254
                                         target_phys_addr_t addr,
255
                                         uint64_t *value,
256
                                         unsigned size,
257
                                         unsigned shift,
258
                                         uint64_t mask)
259
{
260
    MemoryRegion *mr = opaque;
261
    uint64_t tmp;
262

    
263
    tmp = (*value >> shift) & mask;
264
    mr->ops->write(mr->opaque, addr, tmp, size);
265
}
266

    
267
static void access_with_adjusted_size(target_phys_addr_t addr,
268
                                      uint64_t *value,
269
                                      unsigned size,
270
                                      unsigned access_size_min,
271
                                      unsigned access_size_max,
272
                                      void (*access)(void *opaque,
273
                                                     target_phys_addr_t addr,
274
                                                     uint64_t *value,
275
                                                     unsigned size,
276
                                                     unsigned shift,
277
                                                     uint64_t mask),
278
                                      void *opaque)
279
{
280
    uint64_t access_mask;
281
    unsigned access_size;
282
    unsigned i;
283

    
284
    if (!access_size_min) {
285
        access_size_min = 1;
286
    }
287
    if (!access_size_max) {
288
        access_size_max = 4;
289
    }
290
    access_size = MAX(MIN(size, access_size_max), access_size_min);
291
    access_mask = -1ULL >> (64 - access_size * 8);
292
    for (i = 0; i < size; i += access_size) {
293
        /* FIXME: big-endian support */
294
        access(opaque, addr + i, value, access_size, i * 8, access_mask);
295
    }
296
}
297

    
298
static void memory_region_prepare_ram_addr(MemoryRegion *mr);
299

    
300
static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
301
{
302
    ram_addr_t phys_offset, region_offset;
303

    
304
    memory_region_prepare_ram_addr(fr->mr);
305

    
306
    phys_offset = fr->mr->ram_addr;
307
    region_offset = fr->offset_in_region;
308
    /* cpu_register_physical_memory_log() wants region_offset for
309
     * mmio, but prefers offseting phys_offset for RAM.  Humour it.
310
     */
311
    if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
312
        phys_offset += region_offset;
313
        region_offset = 0;
314
    }
315

    
316
    if (!fr->readable) {
317
        phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
318
    }
319

    
320
    if (fr->readonly) {
321
        phys_offset |= IO_MEM_ROM;
322
    }
323

    
324
    cpu_register_physical_memory_log(int128_get64(fr->addr.start),
325
                                     int128_get64(fr->addr.size),
326
                                     phys_offset,
327
                                     region_offset,
328
                                     fr->dirty_log_mask);
329
}
330

    
331
static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
332
{
333
    if (fr->dirty_log_mask) {
334
        Int128 end = addrrange_end(fr->addr);
335
        cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
336
                                       int128_get64(end));
337
    }
338
    cpu_register_physical_memory(int128_get64(fr->addr.start),
339
                                 int128_get64(fr->addr.size),
340
                                 IO_MEM_UNASSIGNED);
341
}
342

    
343
static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
344
{
345
    cpu_physical_log_start(int128_get64(fr->addr.start),
346
                           int128_get64(fr->addr.size));
347
}
348

    
349
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
350
{
351
    cpu_physical_log_stop(int128_get64(fr->addr.start),
352
                          int128_get64(fr->addr.size));
353
}
354

    
355
static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
356
{
357
    int r;
358

    
359
    assert(fd->match_data && int128_get64(fd->addr.size) == 4);
360

    
361
    r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
362
                                    fd->data, true);
363
    if (r < 0) {
364
        abort();
365
    }
366
}
367

    
368
static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
369
{
370
    int r;
371

    
372
    r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
373
                                    fd->data, false);
374
    if (r < 0) {
375
        abort();
376
    }
377
}
378

    
379
static const AddressSpaceOps address_space_ops_memory = {
380
    .range_add = as_memory_range_add,
381
    .range_del = as_memory_range_del,
382
    .log_start = as_memory_log_start,
383
    .log_stop = as_memory_log_stop,
384
    .ioeventfd_add = as_memory_ioeventfd_add,
385
    .ioeventfd_del = as_memory_ioeventfd_del,
386
};
387

    
388
static AddressSpace address_space_memory = {
389
    .ops = &address_space_ops_memory,
390
};
391

    
392
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
393
                                             unsigned width, bool write)
394
{
395
    const MemoryRegionPortio *mrp;
396

    
397
    for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
398
        if (offset >= mrp->offset && offset < mrp->offset + mrp->len
399
            && width == mrp->size
400
            && (write ? (bool)mrp->write : (bool)mrp->read)) {
401
            return mrp;
402
        }
403
    }
404
    return NULL;
405
}
406

    
407
static void memory_region_iorange_read(IORange *iorange,
408
                                       uint64_t offset,
409
                                       unsigned width,
410
                                       uint64_t *data)
411
{
412
    MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
413

    
414
    if (mr->ops->old_portio) {
415
        const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
416

    
417
        *data = ((uint64_t)1 << (width * 8)) - 1;
418
        if (mrp) {
419
            *data = mrp->read(mr->opaque, offset + mr->offset);
420
        } else if (width == 2) {
421
            mrp = find_portio(mr, offset, 1, false);
422
            assert(mrp);
423
            *data = mrp->read(mr->opaque, offset + mr->offset) |
424
                    (mrp->read(mr->opaque, offset + mr->offset + 1) << 8);
425
        }
426
        return;
427
    }
428
    *data = 0;
429
    access_with_adjusted_size(offset + mr->offset, data, width,
430
                              mr->ops->impl.min_access_size,
431
                              mr->ops->impl.max_access_size,
432
                              memory_region_read_accessor, mr);
433
}
434

    
435
static void memory_region_iorange_write(IORange *iorange,
436
                                        uint64_t offset,
437
                                        unsigned width,
438
                                        uint64_t data)
439
{
440
    MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
441

    
442
    if (mr->ops->old_portio) {
443
        const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
444

    
445
        if (mrp) {
446
            mrp->write(mr->opaque, offset + mr->offset, data);
447
        } else if (width == 2) {
448
            mrp = find_portio(mr, offset, 1, false);
449
            assert(mrp);
450
            mrp->write(mr->opaque, offset + mr->offset, data & 0xff);
451
            mrp->write(mr->opaque, offset + mr->offset + 1, data >> 8);
452
        }
453
        return;
454
    }
455
    access_with_adjusted_size(offset + mr->offset, &data, width,
456
                              mr->ops->impl.min_access_size,
457
                              mr->ops->impl.max_access_size,
458
                              memory_region_write_accessor, mr);
459
}
460

    
461
static const IORangeOps memory_region_iorange_ops = {
462
    .read = memory_region_iorange_read,
463
    .write = memory_region_iorange_write,
464
};
465

    
466
static void as_io_range_add(AddressSpace *as, FlatRange *fr)
467
{
468
    iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
469
                 int128_get64(fr->addr.start), int128_get64(fr->addr.size));
470
    ioport_register(&fr->mr->iorange);
471
}
472

    
473
static void as_io_range_del(AddressSpace *as, FlatRange *fr)
474
{
475
    isa_unassign_ioport(int128_get64(fr->addr.start),
476
                        int128_get64(fr->addr.size));
477
}
478

    
479
static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
480
{
481
    int r;
482

    
483
    assert(fd->match_data && int128_get64(fd->addr.size) == 2);
484

    
485
    r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
486
                                   fd->data, true);
487
    if (r < 0) {
488
        abort();
489
    }
490
}
491

    
492
static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
493
{
494
    int r;
495

    
496
    r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
497
                                   fd->data, false);
498
    if (r < 0) {
499
        abort();
500
    }
501
}
502

    
503
static const AddressSpaceOps address_space_ops_io = {
504
    .range_add = as_io_range_add,
505
    .range_del = as_io_range_del,
506
    .ioeventfd_add = as_io_ioeventfd_add,
507
    .ioeventfd_del = as_io_ioeventfd_del,
508
};
509

    
510
static AddressSpace address_space_io = {
511
    .ops = &address_space_ops_io,
512
};
513

    
514
/* Render a memory region into the global view.  Ranges in @view obscure
515
 * ranges in @mr.
516
 */
517
static void render_memory_region(FlatView *view,
518
                                 MemoryRegion *mr,
519
                                 Int128 base,
520
                                 AddrRange clip,
521
                                 bool readonly)
522
{
523
    MemoryRegion *subregion;
524
    unsigned i;
525
    target_phys_addr_t offset_in_region;
526
    Int128 remain;
527
    Int128 now;
528
    FlatRange fr;
529
    AddrRange tmp;
530

    
531
    int128_addto(&base, int128_make64(mr->addr));
532
    readonly |= mr->readonly;
533

    
534
    tmp = addrrange_make(base, mr->size);
535

    
536
    if (!addrrange_intersects(tmp, clip)) {
537
        return;
538
    }
539

    
540
    clip = addrrange_intersection(tmp, clip);
541

    
542
    if (mr->alias) {
543
        int128_subfrom(&base, int128_make64(mr->alias->addr));
544
        int128_subfrom(&base, int128_make64(mr->alias_offset));
545
        render_memory_region(view, mr->alias, base, clip, readonly);
546
        return;
547
    }
548

    
549
    /* Render subregions in priority order. */
550
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
551
        render_memory_region(view, subregion, base, clip, readonly);
552
    }
553

    
554
    if (!mr->terminates) {
555
        return;
556
    }
557

    
558
    offset_in_region = int128_get64(int128_sub(clip.start, base));
559
    base = clip.start;
560
    remain = clip.size;
561

    
562
    /* Render the region itself into any gaps left by the current view. */
563
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
564
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
565
            continue;
566
        }
567
        if (int128_lt(base, view->ranges[i].addr.start)) {
568
            now = int128_min(remain,
569
                             int128_sub(view->ranges[i].addr.start, base));
570
            fr.mr = mr;
571
            fr.offset_in_region = offset_in_region;
572
            fr.addr = addrrange_make(base, now);
573
            fr.dirty_log_mask = mr->dirty_log_mask;
574
            fr.readable = mr->readable;
575
            fr.readonly = readonly;
576
            flatview_insert(view, i, &fr);
577
            ++i;
578
            int128_addto(&base, now);
579
            offset_in_region += int128_get64(now);
580
            int128_subfrom(&remain, now);
581
        }
582
        if (int128_eq(base, view->ranges[i].addr.start)) {
583
            now = int128_min(remain, view->ranges[i].addr.size);
584
            int128_addto(&base, now);
585
            offset_in_region += int128_get64(now);
586
            int128_subfrom(&remain, now);
587
        }
588
    }
589
    if (int128_nz(remain)) {
590
        fr.mr = mr;
591
        fr.offset_in_region = offset_in_region;
592
        fr.addr = addrrange_make(base, remain);
593
        fr.dirty_log_mask = mr->dirty_log_mask;
594
        fr.readable = mr->readable;
595
        fr.readonly = readonly;
596
        flatview_insert(view, i, &fr);
597
    }
598
}
599

    
600
/* Render a memory topology into a list of disjoint absolute ranges. */
601
static FlatView generate_memory_topology(MemoryRegion *mr)
602
{
603
    FlatView view;
604

    
605
    flatview_init(&view);
606

    
607
    render_memory_region(&view, mr, int128_zero(),
608
                         addrrange_make(int128_zero(), int128_2_64()), false);
609
    flatview_simplify(&view);
610

    
611
    return view;
612
}
613

    
614
static void address_space_add_del_ioeventfds(AddressSpace *as,
615
                                             MemoryRegionIoeventfd *fds_new,
616
                                             unsigned fds_new_nb,
617
                                             MemoryRegionIoeventfd *fds_old,
618
                                             unsigned fds_old_nb)
619
{
620
    unsigned iold, inew;
621

    
622
    /* Generate a symmetric difference of the old and new fd sets, adding
623
     * and deleting as necessary.
624
     */
625

    
626
    iold = inew = 0;
627
    while (iold < fds_old_nb || inew < fds_new_nb) {
628
        if (iold < fds_old_nb
629
            && (inew == fds_new_nb
630
                || memory_region_ioeventfd_before(fds_old[iold],
631
                                                  fds_new[inew]))) {
632
            as->ops->ioeventfd_del(as, &fds_old[iold]);
633
            ++iold;
634
        } else if (inew < fds_new_nb
635
                   && (iold == fds_old_nb
636
                       || memory_region_ioeventfd_before(fds_new[inew],
637
                                                         fds_old[iold]))) {
638
            as->ops->ioeventfd_add(as, &fds_new[inew]);
639
            ++inew;
640
        } else {
641
            ++iold;
642
            ++inew;
643
        }
644
    }
645
}
646

    
647
static void address_space_update_ioeventfds(AddressSpace *as)
648
{
649
    FlatRange *fr;
650
    unsigned ioeventfd_nb = 0;
651
    MemoryRegionIoeventfd *ioeventfds = NULL;
652
    AddrRange tmp;
653
    unsigned i;
654

    
655
    FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
656
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
657
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
658
                                  int128_sub(fr->addr.start,
659
                                             int128_make64(fr->offset_in_region)));
660
            if (addrrange_intersects(fr->addr, tmp)) {
661
                ++ioeventfd_nb;
662
                ioeventfds = g_realloc(ioeventfds,
663
                                          ioeventfd_nb * sizeof(*ioeventfds));
664
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
665
                ioeventfds[ioeventfd_nb-1].addr = tmp;
666
            }
667
        }
668
    }
669

    
670
    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
671
                                     as->ioeventfds, as->ioeventfd_nb);
672

    
673
    g_free(as->ioeventfds);
674
    as->ioeventfds = ioeventfds;
675
    as->ioeventfd_nb = ioeventfd_nb;
676
}
677

    
678
static void address_space_update_topology_pass(AddressSpace *as,
679
                                               FlatView old_view,
680
                                               FlatView new_view,
681
                                               bool adding)
682
{
683
    unsigned iold, inew;
684
    FlatRange *frold, *frnew;
685

    
686
    /* Generate a symmetric difference of the old and new memory maps.
687
     * Kill ranges in the old map, and instantiate ranges in the new map.
688
     */
689
    iold = inew = 0;
690
    while (iold < old_view.nr || inew < new_view.nr) {
691
        if (iold < old_view.nr) {
692
            frold = &old_view.ranges[iold];
693
        } else {
694
            frold = NULL;
695
        }
696
        if (inew < new_view.nr) {
697
            frnew = &new_view.ranges[inew];
698
        } else {
699
            frnew = NULL;
700
        }
701

    
702
        if (frold
703
            && (!frnew
704
                || int128_lt(frold->addr.start, frnew->addr.start)
705
                || (int128_eq(frold->addr.start, frnew->addr.start)
706
                    && !flatrange_equal(frold, frnew)))) {
707
            /* In old, but (not in new, or in new but attributes changed). */
708

    
709
            if (!adding) {
710
                as->ops->range_del(as, frold);
711
            }
712

    
713
            ++iold;
714
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
715
            /* In both (logging may have changed) */
716

    
717
            if (adding) {
718
                if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
719
                    as->ops->log_stop(as, frnew);
720
                } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
721
                    as->ops->log_start(as, frnew);
722
                }
723
            }
724

    
725
            ++iold;
726
            ++inew;
727
        } else {
728
            /* In new */
729

    
730
            if (adding) {
731
                as->ops->range_add(as, frnew);
732
            }
733

    
734
            ++inew;
735
        }
736
    }
737
}
738

    
739

    
740
static void address_space_update_topology(AddressSpace *as)
741
{
742
    FlatView old_view = as->current_map;
743
    FlatView new_view = generate_memory_topology(as->root);
744

    
745
    address_space_update_topology_pass(as, old_view, new_view, false);
746
    address_space_update_topology_pass(as, old_view, new_view, true);
747

    
748
    as->current_map = new_view;
749
    flatview_destroy(&old_view);
750
    address_space_update_ioeventfds(as);
751
}
752

    
753
static void memory_region_update_topology(void)
754
{
755
    if (memory_region_transaction_depth) {
756
        return;
757
    }
758

    
759
    if (address_space_memory.root) {
760
        address_space_update_topology(&address_space_memory);
761
    }
762
    if (address_space_io.root) {
763
        address_space_update_topology(&address_space_io);
764
    }
765
}
766

    
767
void memory_region_transaction_begin(void)
768
{
769
    ++memory_region_transaction_depth;
770
}
771

    
772
void memory_region_transaction_commit(void)
773
{
774
    assert(memory_region_transaction_depth);
775
    --memory_region_transaction_depth;
776
    memory_region_update_topology();
777
}
778

    
779
static void memory_region_destructor_none(MemoryRegion *mr)
780
{
781
}
782

    
783
static void memory_region_destructor_ram(MemoryRegion *mr)
784
{
785
    qemu_ram_free(mr->ram_addr);
786
}
787

    
788
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
789
{
790
    qemu_ram_free_from_ptr(mr->ram_addr);
791
}
792

    
793
static void memory_region_destructor_iomem(MemoryRegion *mr)
794
{
795
    cpu_unregister_io_memory(mr->ram_addr);
796
}
797

    
798
static void memory_region_destructor_rom_device(MemoryRegion *mr)
799
{
800
    qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
801
    cpu_unregister_io_memory(mr->ram_addr & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
802
}
803

    
804
void memory_region_init(MemoryRegion *mr,
805
                        const char *name,
806
                        uint64_t size)
807
{
808
    mr->ops = NULL;
809
    mr->parent = NULL;
810
    mr->size = int128_make64(size);
811
    if (size == UINT64_MAX) {
812
        mr->size = int128_2_64();
813
    }
814
    mr->addr = 0;
815
    mr->offset = 0;
816
    mr->terminates = false;
817
    mr->readable = true;
818
    mr->readonly = false;
819
    mr->destructor = memory_region_destructor_none;
820
    mr->priority = 0;
821
    mr->may_overlap = false;
822
    mr->alias = NULL;
823
    QTAILQ_INIT(&mr->subregions);
824
    memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
825
    QTAILQ_INIT(&mr->coalesced);
826
    mr->name = g_strdup(name);
827
    mr->dirty_log_mask = 0;
828
    mr->ioeventfd_nb = 0;
829
    mr->ioeventfds = NULL;
830
}
831

    
832
static bool memory_region_access_valid(MemoryRegion *mr,
833
                                       target_phys_addr_t addr,
834
                                       unsigned size,
835
                                       bool is_write)
836
{
837
    if (mr->ops->valid.accepts
838
        && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write)) {
839
        return false;
840
    }
841

    
842
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
843
        return false;
844
    }
845

    
846
    /* Treat zero as compatibility all valid */
847
    if (!mr->ops->valid.max_access_size) {
848
        return true;
849
    }
850

    
851
    if (size > mr->ops->valid.max_access_size
852
        || size < mr->ops->valid.min_access_size) {
853
        return false;
854
    }
855
    return true;
856
}
857

    
858
static uint32_t memory_region_read_thunk_n(void *_mr,
859
                                           target_phys_addr_t addr,
860
                                           unsigned size)
861
{
862
    MemoryRegion *mr = _mr;
863
    uint64_t data = 0;
864

    
865
    if (!memory_region_access_valid(mr, addr, size, false)) {
866
        return -1U; /* FIXME: better signalling */
867
    }
868

    
869
    if (!mr->ops->read) {
870
        return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
871
    }
872

    
873
    /* FIXME: support unaligned access */
874
    access_with_adjusted_size(addr + mr->offset, &data, size,
875
                              mr->ops->impl.min_access_size,
876
                              mr->ops->impl.max_access_size,
877
                              memory_region_read_accessor, mr);
878

    
879
    return data;
880
}
881

    
882
static void memory_region_write_thunk_n(void *_mr,
883
                                        target_phys_addr_t addr,
884
                                        unsigned size,
885
                                        uint64_t data)
886
{
887
    MemoryRegion *mr = _mr;
888

    
889
    if (!memory_region_access_valid(mr, addr, size, true)) {
890
        return; /* FIXME: better signalling */
891
    }
892

    
893
    if (!mr->ops->write) {
894
        mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
895
        return;
896
    }
897

    
898
    /* FIXME: support unaligned access */
899
    access_with_adjusted_size(addr + mr->offset, &data, size,
900
                              mr->ops->impl.min_access_size,
901
                              mr->ops->impl.max_access_size,
902
                              memory_region_write_accessor, mr);
903
}
904

    
905
static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
906
{
907
    return memory_region_read_thunk_n(mr, addr, 1);
908
}
909

    
910
static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
911
{
912
    return memory_region_read_thunk_n(mr, addr, 2);
913
}
914

    
915
static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
916
{
917
    return memory_region_read_thunk_n(mr, addr, 4);
918
}
919

    
920
static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
921
                                        uint32_t data)
922
{
923
    memory_region_write_thunk_n(mr, addr, 1, data);
924
}
925

    
926
static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
927
                                        uint32_t data)
928
{
929
    memory_region_write_thunk_n(mr, addr, 2, data);
930
}
931

    
932
static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
933
                                        uint32_t data)
934
{
935
    memory_region_write_thunk_n(mr, addr, 4, data);
936
}
937

    
938
static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
939
    memory_region_read_thunk_b,
940
    memory_region_read_thunk_w,
941
    memory_region_read_thunk_l,
942
};
943

    
944
static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
945
    memory_region_write_thunk_b,
946
    memory_region_write_thunk_w,
947
    memory_region_write_thunk_l,
948
};
949

    
950
static void memory_region_prepare_ram_addr(MemoryRegion *mr)
951
{
952
    if (mr->backend_registered) {
953
        return;
954
    }
955

    
956
    mr->destructor = memory_region_destructor_iomem;
957
    mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
958
                                          memory_region_write_thunk,
959
                                          mr,
960
                                          mr->ops->endianness);
961
    mr->backend_registered = true;
962
}
963

    
964
void memory_region_init_io(MemoryRegion *mr,
965
                           const MemoryRegionOps *ops,
966
                           void *opaque,
967
                           const char *name,
968
                           uint64_t size)
969
{
970
    memory_region_init(mr, name, size);
971
    mr->ops = ops;
972
    mr->opaque = opaque;
973
    mr->terminates = true;
974
    mr->backend_registered = false;
975
}
976

    
977
void memory_region_init_ram(MemoryRegion *mr,
978
                            DeviceState *dev,
979
                            const char *name,
980
                            uint64_t size)
981
{
982
    memory_region_init(mr, name, size);
983
    mr->terminates = true;
984
    mr->destructor = memory_region_destructor_ram;
985
    mr->ram_addr = qemu_ram_alloc(dev, name, size);
986
    mr->backend_registered = true;
987
}
988

    
989
void memory_region_init_ram_ptr(MemoryRegion *mr,
990
                                DeviceState *dev,
991
                                const char *name,
992
                                uint64_t size,
993
                                void *ptr)
994
{
995
    memory_region_init(mr, name, size);
996
    mr->terminates = true;
997
    mr->destructor = memory_region_destructor_ram_from_ptr;
998
    mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
999
    mr->backend_registered = true;
1000
}
1001

    
1002
void memory_region_init_alias(MemoryRegion *mr,
1003
                              const char *name,
1004
                              MemoryRegion *orig,
1005
                              target_phys_addr_t offset,
1006
                              uint64_t size)
1007
{
1008
    memory_region_init(mr, name, size);
1009
    mr->alias = orig;
1010
    mr->alias_offset = offset;
1011
}
1012

    
1013
void memory_region_init_rom_device(MemoryRegion *mr,
1014
                                   const MemoryRegionOps *ops,
1015
                                   void *opaque,
1016
                                   DeviceState *dev,
1017
                                   const char *name,
1018
                                   uint64_t size)
1019
{
1020
    memory_region_init(mr, name, size);
1021
    mr->ops = ops;
1022
    mr->opaque = opaque;
1023
    mr->terminates = true;
1024
    mr->destructor = memory_region_destructor_rom_device;
1025
    mr->ram_addr = qemu_ram_alloc(dev, name, size);
1026
    mr->ram_addr |= cpu_register_io_memory(memory_region_read_thunk,
1027
                                           memory_region_write_thunk,
1028
                                           mr,
1029
                                           mr->ops->endianness);
1030
    mr->ram_addr |= IO_MEM_ROMD;
1031
    mr->backend_registered = true;
1032
}
1033

    
1034
void memory_region_destroy(MemoryRegion *mr)
1035
{
1036
    assert(QTAILQ_EMPTY(&mr->subregions));
1037
    mr->destructor(mr);
1038
    memory_region_clear_coalescing(mr);
1039
    g_free((char *)mr->name);
1040
    g_free(mr->ioeventfds);
1041
}
1042

    
1043
uint64_t memory_region_size(MemoryRegion *mr)
1044
{
1045
    if (int128_eq(mr->size, int128_2_64())) {
1046
        return UINT64_MAX;
1047
    }
1048
    return int128_get64(mr->size);
1049
}
1050

    
1051
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
1052
{
1053
    mr->offset = offset;
1054
}
1055

    
1056
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1057
{
1058
    uint8_t mask = 1 << client;
1059

    
1060
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1061
    memory_region_update_topology();
1062
}
1063

    
1064
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1065
                             unsigned client)
1066
{
1067
    assert(mr->terminates);
1068
    return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
1069
}
1070

    
1071
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
1072
{
1073
    assert(mr->terminates);
1074
    return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
1075
}
1076

    
1077
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1078
{
1079
    FlatRange *fr;
1080

    
1081
    FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
1082
        if (fr->mr == mr) {
1083
            cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
1084
                                           int128_get64(addrrange_end(fr->addr)));
1085
        }
1086
    }
1087
}
1088

    
1089
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1090
{
1091
    if (mr->readonly != readonly) {
1092
        mr->readonly = readonly;
1093
        memory_region_update_topology();
1094
    }
1095
}
1096

    
1097
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
1098
{
1099
    if (mr->readable != readable) {
1100
        mr->readable = readable;
1101
        memory_region_update_topology();
1102
    }
1103
}
1104

    
1105
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1106
                               target_phys_addr_t size, unsigned client)
1107
{
1108
    assert(mr->terminates);
1109
    cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
1110
                                    mr->ram_addr + addr + size,
1111
                                    1 << client);
1112
}
1113

    
1114
void *memory_region_get_ram_ptr(MemoryRegion *mr)
1115
{
1116
    if (mr->alias) {
1117
        return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
1118
    }
1119

    
1120
    assert(mr->terminates);
1121

    
1122
    return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
1123
}
1124

    
1125
static void memory_region_update_coalesced_range(MemoryRegion *mr)
1126
{
1127
    FlatRange *fr;
1128
    CoalescedMemoryRange *cmr;
1129
    AddrRange tmp;
1130

    
1131
    FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
1132
        if (fr->mr == mr) {
1133
            qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
1134
                                           int128_get64(fr->addr.size));
1135
            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1136
                tmp = addrrange_shift(cmr->addr,
1137
                                      int128_sub(fr->addr.start,
1138
                                                 int128_make64(fr->offset_in_region)));
1139
                if (!addrrange_intersects(tmp, fr->addr)) {
1140
                    continue;
1141
                }
1142
                tmp = addrrange_intersection(tmp, fr->addr);
1143
                qemu_register_coalesced_mmio(int128_get64(tmp.start),
1144
                                             int128_get64(tmp.size));
1145
            }
1146
        }
1147
    }
1148
}
1149

    
1150
void memory_region_set_coalescing(MemoryRegion *mr)
1151
{
1152
    memory_region_clear_coalescing(mr);
1153
    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1154
}
1155

    
1156
void memory_region_add_coalescing(MemoryRegion *mr,
1157
                                  target_phys_addr_t offset,
1158
                                  uint64_t size)
1159
{
1160
    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1161

    
1162
    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1163
    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1164
    memory_region_update_coalesced_range(mr);
1165
}
1166

    
1167
void memory_region_clear_coalescing(MemoryRegion *mr)
1168
{
1169
    CoalescedMemoryRange *cmr;
1170

    
1171
    while (!QTAILQ_EMPTY(&mr->coalesced)) {
1172
        cmr = QTAILQ_FIRST(&mr->coalesced);
1173
        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1174
        g_free(cmr);
1175
    }
1176
    memory_region_update_coalesced_range(mr);
1177
}
1178

    
1179
void memory_region_add_eventfd(MemoryRegion *mr,
1180
                               target_phys_addr_t addr,
1181
                               unsigned size,
1182
                               bool match_data,
1183
                               uint64_t data,
1184
                               int fd)
1185
{
1186
    MemoryRegionIoeventfd mrfd = {
1187
        .addr.start = int128_make64(addr),
1188
        .addr.size = int128_make64(size),
1189
        .match_data = match_data,
1190
        .data = data,
1191
        .fd = fd,
1192
    };
1193
    unsigned i;
1194

    
1195
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
1196
        if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1197
            break;
1198
        }
1199
    }
1200
    ++mr->ioeventfd_nb;
1201
    mr->ioeventfds = g_realloc(mr->ioeventfds,
1202
                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1203
    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1204
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1205
    mr->ioeventfds[i] = mrfd;
1206
    memory_region_update_topology();
1207
}
1208

    
1209
void memory_region_del_eventfd(MemoryRegion *mr,
1210
                               target_phys_addr_t addr,
1211
                               unsigned size,
1212
                               bool match_data,
1213
                               uint64_t data,
1214
                               int fd)
1215
{
1216
    MemoryRegionIoeventfd mrfd = {
1217
        .addr.start = int128_make64(addr),
1218
        .addr.size = int128_make64(size),
1219
        .match_data = match_data,
1220
        .data = data,
1221
        .fd = fd,
1222
    };
1223
    unsigned i;
1224

    
1225
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
1226
        if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1227
            break;
1228
        }
1229
    }
1230
    assert(i != mr->ioeventfd_nb);
1231
    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1232
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1233
    --mr->ioeventfd_nb;
1234
    mr->ioeventfds = g_realloc(mr->ioeventfds,
1235
                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1236
    memory_region_update_topology();
1237
}
1238

    
1239
static void memory_region_add_subregion_common(MemoryRegion *mr,
1240
                                               target_phys_addr_t offset,
1241
                                               MemoryRegion *subregion)
1242
{
1243
    MemoryRegion *other;
1244

    
1245
    assert(!subregion->parent);
1246
    subregion->parent = mr;
1247
    subregion->addr = offset;
1248
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1249
        if (subregion->may_overlap || other->may_overlap) {
1250
            continue;
1251
        }
1252
        if (int128_gt(int128_make64(offset),
1253
                      int128_add(int128_make64(other->addr), other->size))
1254
            || int128_le(int128_add(int128_make64(offset), subregion->size),
1255
                         int128_make64(other->addr))) {
1256
            continue;
1257
        }
1258
#if 0
1259
        printf("warning: subregion collision %llx/%llx (%s) "
1260
               "vs %llx/%llx (%s)\n",
1261
               (unsigned long long)offset,
1262
               (unsigned long long)int128_get64(subregion->size),
1263
               subregion->name,
1264
               (unsigned long long)other->addr,
1265
               (unsigned long long)int128_get64(other->size),
1266
               other->name);
1267
#endif
1268
    }
1269
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1270
        if (subregion->priority >= other->priority) {
1271
            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1272
            goto done;
1273
        }
1274
    }
1275
    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1276
done:
1277
    memory_region_update_topology();
1278
}
1279

    
1280

    
1281
void memory_region_add_subregion(MemoryRegion *mr,
1282
                                 target_phys_addr_t offset,
1283
                                 MemoryRegion *subregion)
1284
{
1285
    subregion->may_overlap = false;
1286
    subregion->priority = 0;
1287
    memory_region_add_subregion_common(mr, offset, subregion);
1288
}
1289

    
1290
void memory_region_add_subregion_overlap(MemoryRegion *mr,
1291
                                         target_phys_addr_t offset,
1292
                                         MemoryRegion *subregion,
1293
                                         unsigned priority)
1294
{
1295
    subregion->may_overlap = true;
1296
    subregion->priority = priority;
1297
    memory_region_add_subregion_common(mr, offset, subregion);
1298
}
1299

    
1300
void memory_region_del_subregion(MemoryRegion *mr,
1301
                                 MemoryRegion *subregion)
1302
{
1303
    assert(subregion->parent == mr);
1304
    subregion->parent = NULL;
1305
    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1306
    memory_region_update_topology();
1307
}
1308

    
1309
void set_system_memory_map(MemoryRegion *mr)
1310
{
1311
    address_space_memory.root = mr;
1312
    memory_region_update_topology();
1313
}
1314

    
1315
void set_system_io_map(MemoryRegion *mr)
1316
{
1317
    address_space_io.root = mr;
1318
    memory_region_update_topology();
1319
}
1320

    
1321
typedef struct MemoryRegionList MemoryRegionList;
1322

    
1323
struct MemoryRegionList {
1324
    const MemoryRegion *mr;
1325
    bool printed;
1326
    QTAILQ_ENTRY(MemoryRegionList) queue;
1327
};
1328

    
1329
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
1330

    
1331
static void mtree_print_mr(fprintf_function mon_printf, void *f,
1332
                           const MemoryRegion *mr, unsigned int level,
1333
                           target_phys_addr_t base,
1334
                           MemoryRegionListHead *alias_print_queue)
1335
{
1336
    MemoryRegionList *new_ml, *ml, *next_ml;
1337
    MemoryRegionListHead submr_print_queue;
1338
    const MemoryRegion *submr;
1339
    unsigned int i;
1340

    
1341
    if (!mr) {
1342
        return;
1343
    }
1344

    
1345
    for (i = 0; i < level; i++) {
1346
        mon_printf(f, "  ");
1347
    }
1348

    
1349
    if (mr->alias) {
1350
        MemoryRegionList *ml;
1351
        bool found = false;
1352

    
1353
        /* check if the alias is already in the queue */
1354
        QTAILQ_FOREACH(ml, alias_print_queue, queue) {
1355
            if (ml->mr == mr->alias && !ml->printed) {
1356
                found = true;
1357
            }
1358
        }
1359

    
1360
        if (!found) {
1361
            ml = g_new(MemoryRegionList, 1);
1362
            ml->mr = mr->alias;
1363
            ml->printed = false;
1364
            QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
1365
        }
1366
        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s "
1367
                   TARGET_FMT_plx "-" TARGET_FMT_plx "\n",
1368
                   base + mr->addr,
1369
                   base + mr->addr
1370
                   + (target_phys_addr_t)int128_get64(mr->size) - 1,
1371
                   mr->priority,
1372
                   mr->name,
1373
                   mr->alias->name,
1374
                   mr->alias_offset,
1375
                   mr->alias_offset
1376
                   + (target_phys_addr_t)int128_get64(mr->size) - 1);
1377
    } else {
1378
        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n",
1379
                   base + mr->addr,
1380
                   base + mr->addr
1381
                   + (target_phys_addr_t)int128_get64(mr->size) - 1,
1382
                   mr->priority,
1383
                   mr->name);
1384
    }
1385

    
1386
    QTAILQ_INIT(&submr_print_queue);
1387

    
1388
    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
1389
        new_ml = g_new(MemoryRegionList, 1);
1390
        new_ml->mr = submr;
1391
        QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1392
            if (new_ml->mr->addr < ml->mr->addr ||
1393
                (new_ml->mr->addr == ml->mr->addr &&
1394
                 new_ml->mr->priority > ml->mr->priority)) {
1395
                QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
1396
                new_ml = NULL;
1397
                break;
1398
            }
1399
        }
1400
        if (new_ml) {
1401
            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
1402
        }
1403
    }
1404

    
1405
    QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1406
        mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
1407
                       alias_print_queue);
1408
    }
1409

    
1410
    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
1411
        g_free(ml);
1412
    }
1413
}
1414

    
1415
void mtree_info(fprintf_function mon_printf, void *f)
1416
{
1417
    MemoryRegionListHead ml_head;
1418
    MemoryRegionList *ml, *ml2;
1419

    
1420
    QTAILQ_INIT(&ml_head);
1421

    
1422
    mon_printf(f, "memory\n");
1423
    mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head);
1424

    
1425
    /* print aliased regions */
1426
    QTAILQ_FOREACH(ml, &ml_head, queue) {
1427
        if (!ml->printed) {
1428
            mon_printf(f, "%s\n", ml->mr->name);
1429
            mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
1430
        }
1431
    }
1432

    
1433
    QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
1434
        g_free(ml);
1435
    }
1436

    
1437
    if (address_space_io.root &&
1438
        !QTAILQ_EMPTY(&address_space_io.root->subregions)) {
1439
        QTAILQ_INIT(&ml_head);
1440
        mon_printf(f, "I/O\n");
1441
        mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head);
1442
    }
1443
}