Statistics
| Branch: | Revision:

root / memory.c @ 2c3579ab

History | View | Annotate | Download (48.4 kB)

1
/*
2
 * Physical memory management
3
 *
4
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5
 *
6
 * Authors:
7
 *  Avi Kivity <avi@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "memory.h"
15
#include "exec-memory.h"
16
#include "ioport.h"
17
#include "bitops.h"
18
#include "kvm.h"
19
#include <assert.h>
20

    
21
#define WANT_EXEC_OBSOLETE
22
#include "exec-obsolete.h"
23

    
24
unsigned memory_region_transaction_depth = 0;
25
static bool memory_region_update_pending = false;
26
static bool global_dirty_log = false;
27

    
28
static QLIST_HEAD(, MemoryListener) memory_listeners
29
    = QLIST_HEAD_INITIALIZER(memory_listeners);
30

    
31
typedef struct AddrRange AddrRange;
32

    
33
/*
34
 * Note using signed integers limits us to physical addresses at most
35
 * 63 bits wide.  They are needed for negative offsetting in aliases
36
 * (large MemoryRegion::alias_offset).
37
 */
38
struct AddrRange {
39
    Int128 start;
40
    Int128 size;
41
};
42

    
43
static AddrRange addrrange_make(Int128 start, Int128 size)
44
{
45
    return (AddrRange) { start, size };
46
}
47

    
48
static bool addrrange_equal(AddrRange r1, AddrRange r2)
49
{
50
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
51
}
52

    
53
static Int128 addrrange_end(AddrRange r)
54
{
55
    return int128_add(r.start, r.size);
56
}
57

    
58
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
59
{
60
    int128_addto(&range.start, delta);
61
    return range;
62
}
63

    
64
static bool addrrange_contains(AddrRange range, Int128 addr)
65
{
66
    return int128_ge(addr, range.start)
67
        && int128_lt(addr, addrrange_end(range));
68
}
69

    
70
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
71
{
72
    return addrrange_contains(r1, r2.start)
73
        || addrrange_contains(r2, r1.start);
74
}
75

    
76
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
77
{
78
    Int128 start = int128_max(r1.start, r2.start);
79
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
80
    return addrrange_make(start, int128_sub(end, start));
81
}
82

    
83
struct CoalescedMemoryRange {
84
    AddrRange addr;
85
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
86
};
87

    
88
struct MemoryRegionIoeventfd {
89
    AddrRange addr;
90
    bool match_data;
91
    uint64_t data;
92
    int fd;
93
};
94

    
95
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
96
                                           MemoryRegionIoeventfd b)
97
{
98
    if (int128_lt(a.addr.start, b.addr.start)) {
99
        return true;
100
    } else if (int128_gt(a.addr.start, b.addr.start)) {
101
        return false;
102
    } else if (int128_lt(a.addr.size, b.addr.size)) {
103
        return true;
104
    } else if (int128_gt(a.addr.size, b.addr.size)) {
105
        return false;
106
    } else if (a.match_data < b.match_data) {
107
        return true;
108
    } else  if (a.match_data > b.match_data) {
109
        return false;
110
    } else if (a.match_data) {
111
        if (a.data < b.data) {
112
            return true;
113
        } else if (a.data > b.data) {
114
            return false;
115
        }
116
    }
117
    if (a.fd < b.fd) {
118
        return true;
119
    } else if (a.fd > b.fd) {
120
        return false;
121
    }
122
    return false;
123
}
124

    
125
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
126
                                          MemoryRegionIoeventfd b)
127
{
128
    return !memory_region_ioeventfd_before(a, b)
129
        && !memory_region_ioeventfd_before(b, a);
130
}
131

    
132
typedef struct FlatRange FlatRange;
133
typedef struct FlatView FlatView;
134

    
135
/* Range of memory in the global map.  Addresses are absolute. */
136
struct FlatRange {
137
    MemoryRegion *mr;
138
    target_phys_addr_t offset_in_region;
139
    AddrRange addr;
140
    uint8_t dirty_log_mask;
141
    bool readable;
142
    bool readonly;
143
};
144

    
145
/* Flattened global view of current active memory hierarchy.  Kept in sorted
146
 * order.
147
 */
148
struct FlatView {
149
    FlatRange *ranges;
150
    unsigned nr;
151
    unsigned nr_allocated;
152
};
153

    
154
typedef struct AddressSpace AddressSpace;
155
typedef struct AddressSpaceOps AddressSpaceOps;
156

    
157
/* A system address space - I/O, memory, etc. */
158
struct AddressSpace {
159
    const AddressSpaceOps *ops;
160
    MemoryRegion *root;
161
    FlatView current_map;
162
    int ioeventfd_nb;
163
    MemoryRegionIoeventfd *ioeventfds;
164
};
165

    
166
struct AddressSpaceOps {
167
    void (*range_add)(AddressSpace *as, FlatRange *fr);
168
    void (*range_del)(AddressSpace *as, FlatRange *fr);
169
    void (*log_start)(AddressSpace *as, FlatRange *fr);
170
    void (*log_stop)(AddressSpace *as, FlatRange *fr);
171
    void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
172
    void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
173
};
174

    
175
#define FOR_EACH_FLAT_RANGE(var, view)          \
176
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
177

    
178
static bool flatrange_equal(FlatRange *a, FlatRange *b)
179
{
180
    return a->mr == b->mr
181
        && addrrange_equal(a->addr, b->addr)
182
        && a->offset_in_region == b->offset_in_region
183
        && a->readable == b->readable
184
        && a->readonly == b->readonly;
185
}
186

    
187
static void flatview_init(FlatView *view)
188
{
189
    view->ranges = NULL;
190
    view->nr = 0;
191
    view->nr_allocated = 0;
192
}
193

    
194
/* Insert a range into a given position.  Caller is responsible for maintaining
195
 * sorting order.
196
 */
197
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
198
{
199
    if (view->nr == view->nr_allocated) {
200
        view->nr_allocated = MAX(2 * view->nr, 10);
201
        view->ranges = g_realloc(view->ranges,
202
                                    view->nr_allocated * sizeof(*view->ranges));
203
    }
204
    memmove(view->ranges + pos + 1, view->ranges + pos,
205
            (view->nr - pos) * sizeof(FlatRange));
206
    view->ranges[pos] = *range;
207
    ++view->nr;
208
}
209

    
210
static void flatview_destroy(FlatView *view)
211
{
212
    g_free(view->ranges);
213
}
214

    
215
static bool can_merge(FlatRange *r1, FlatRange *r2)
216
{
217
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
218
        && r1->mr == r2->mr
219
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
220
                                r1->addr.size),
221
                     int128_make64(r2->offset_in_region))
222
        && r1->dirty_log_mask == r2->dirty_log_mask
223
        && r1->readable == r2->readable
224
        && r1->readonly == r2->readonly;
225
}
226

    
227
/* Attempt to simplify a view by merging ajacent ranges */
228
static void flatview_simplify(FlatView *view)
229
{
230
    unsigned i, j;
231

    
232
    i = 0;
233
    while (i < view->nr) {
234
        j = i + 1;
235
        while (j < view->nr
236
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
237
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
238
            ++j;
239
        }
240
        ++i;
241
        memmove(&view->ranges[i], &view->ranges[j],
242
                (view->nr - j) * sizeof(view->ranges[j]));
243
        view->nr -= j - i;
244
    }
245
}
246

    
247
static void memory_region_read_accessor(void *opaque,
248
                                        target_phys_addr_t addr,
249
                                        uint64_t *value,
250
                                        unsigned size,
251
                                        unsigned shift,
252
                                        uint64_t mask)
253
{
254
    MemoryRegion *mr = opaque;
255
    uint64_t tmp;
256

    
257
    tmp = mr->ops->read(mr->opaque, addr, size);
258
    *value |= (tmp & mask) << shift;
259
}
260

    
261
static void memory_region_write_accessor(void *opaque,
262
                                         target_phys_addr_t addr,
263
                                         uint64_t *value,
264
                                         unsigned size,
265
                                         unsigned shift,
266
                                         uint64_t mask)
267
{
268
    MemoryRegion *mr = opaque;
269
    uint64_t tmp;
270

    
271
    tmp = (*value >> shift) & mask;
272
    mr->ops->write(mr->opaque, addr, tmp, size);
273
}
274

    
275
static void access_with_adjusted_size(target_phys_addr_t addr,
276
                                      uint64_t *value,
277
                                      unsigned size,
278
                                      unsigned access_size_min,
279
                                      unsigned access_size_max,
280
                                      void (*access)(void *opaque,
281
                                                     target_phys_addr_t addr,
282
                                                     uint64_t *value,
283
                                                     unsigned size,
284
                                                     unsigned shift,
285
                                                     uint64_t mask),
286
                                      void *opaque)
287
{
288
    uint64_t access_mask;
289
    unsigned access_size;
290
    unsigned i;
291

    
292
    if (!access_size_min) {
293
        access_size_min = 1;
294
    }
295
    if (!access_size_max) {
296
        access_size_max = 4;
297
    }
298
    access_size = MAX(MIN(size, access_size_max), access_size_min);
299
    access_mask = -1ULL >> (64 - access_size * 8);
300
    for (i = 0; i < size; i += access_size) {
301
        /* FIXME: big-endian support */
302
        access(opaque, addr + i, value, access_size, i * 8, access_mask);
303
    }
304
}
305

    
306
static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
307
{
308
    MemoryRegionSection section = {
309
        .mr = fr->mr,
310
        .offset_within_address_space = int128_get64(fr->addr.start),
311
        .offset_within_region = fr->offset_in_region,
312
        .size = int128_get64(fr->addr.size),
313
    };
314

    
315
    cpu_register_physical_memory_log(&section, fr->readable, fr->readonly);
316
}
317

    
318
static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
319
{
320
    MemoryRegionSection section = {
321
        .mr = &io_mem_unassigned,
322
        .offset_within_address_space = int128_get64(fr->addr.start),
323
        .offset_within_region = int128_get64(fr->addr.start),
324
        .size = int128_get64(fr->addr.size),
325
    };
326

    
327
    cpu_register_physical_memory_log(&section, true, false);
328
}
329

    
330
static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
331
{
332
}
333

    
334
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
335
{
336
}
337

    
338
static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
339
{
340
    int r;
341

    
342
    assert(fd->match_data && int128_get64(fd->addr.size) == 4);
343

    
344
    r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
345
                                    fd->data, true);
346
    if (r < 0) {
347
        abort();
348
    }
349
}
350

    
351
static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
352
{
353
    int r;
354

    
355
    r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
356
                                    fd->data, false);
357
    if (r < 0) {
358
        abort();
359
    }
360
}
361

    
362
static const AddressSpaceOps address_space_ops_memory = {
363
    .range_add = as_memory_range_add,
364
    .range_del = as_memory_range_del,
365
    .log_start = as_memory_log_start,
366
    .log_stop = as_memory_log_stop,
367
    .ioeventfd_add = as_memory_ioeventfd_add,
368
    .ioeventfd_del = as_memory_ioeventfd_del,
369
};
370

    
371
static AddressSpace address_space_memory = {
372
    .ops = &address_space_ops_memory,
373
};
374

    
375
static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
376
                                             unsigned width, bool write)
377
{
378
    const MemoryRegionPortio *mrp;
379

    
380
    for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
381
        if (offset >= mrp->offset && offset < mrp->offset + mrp->len
382
            && width == mrp->size
383
            && (write ? (bool)mrp->write : (bool)mrp->read)) {
384
            return mrp;
385
        }
386
    }
387
    return NULL;
388
}
389

    
390
static void memory_region_iorange_read(IORange *iorange,
391
                                       uint64_t offset,
392
                                       unsigned width,
393
                                       uint64_t *data)
394
{
395
    MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
396

    
397
    if (mr->ops->old_portio) {
398
        const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
399

    
400
        *data = ((uint64_t)1 << (width * 8)) - 1;
401
        if (mrp) {
402
            *data = mrp->read(mr->opaque, offset + mr->offset);
403
        } else if (width == 2) {
404
            mrp = find_portio(mr, offset, 1, false);
405
            assert(mrp);
406
            *data = mrp->read(mr->opaque, offset + mr->offset) |
407
                    (mrp->read(mr->opaque, offset + mr->offset + 1) << 8);
408
        }
409
        return;
410
    }
411
    *data = 0;
412
    access_with_adjusted_size(offset + mr->offset, data, width,
413
                              mr->ops->impl.min_access_size,
414
                              mr->ops->impl.max_access_size,
415
                              memory_region_read_accessor, mr);
416
}
417

    
418
static void memory_region_iorange_write(IORange *iorange,
419
                                        uint64_t offset,
420
                                        unsigned width,
421
                                        uint64_t data)
422
{
423
    MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
424

    
425
    if (mr->ops->old_portio) {
426
        const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
427

    
428
        if (mrp) {
429
            mrp->write(mr->opaque, offset + mr->offset, data);
430
        } else if (width == 2) {
431
            mrp = find_portio(mr, offset, 1, false);
432
            assert(mrp);
433
            mrp->write(mr->opaque, offset + mr->offset, data & 0xff);
434
            mrp->write(mr->opaque, offset + mr->offset + 1, data >> 8);
435
        }
436
        return;
437
    }
438
    access_with_adjusted_size(offset + mr->offset, &data, width,
439
                              mr->ops->impl.min_access_size,
440
                              mr->ops->impl.max_access_size,
441
                              memory_region_write_accessor, mr);
442
}
443

    
444
static const IORangeOps memory_region_iorange_ops = {
445
    .read = memory_region_iorange_read,
446
    .write = memory_region_iorange_write,
447
};
448

    
449
static void as_io_range_add(AddressSpace *as, FlatRange *fr)
450
{
451
    iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
452
                 int128_get64(fr->addr.start), int128_get64(fr->addr.size));
453
    ioport_register(&fr->mr->iorange);
454
}
455

    
456
static void as_io_range_del(AddressSpace *as, FlatRange *fr)
457
{
458
    isa_unassign_ioport(int128_get64(fr->addr.start),
459
                        int128_get64(fr->addr.size));
460
}
461

    
462
static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
463
{
464
    int r;
465

    
466
    assert(fd->match_data && int128_get64(fd->addr.size) == 2);
467

    
468
    r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
469
                                   fd->data, true);
470
    if (r < 0) {
471
        abort();
472
    }
473
}
474

    
475
static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
476
{
477
    int r;
478

    
479
    r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
480
                                   fd->data, false);
481
    if (r < 0) {
482
        abort();
483
    }
484
}
485

    
486
static const AddressSpaceOps address_space_ops_io = {
487
    .range_add = as_io_range_add,
488
    .range_del = as_io_range_del,
489
    .ioeventfd_add = as_io_ioeventfd_add,
490
    .ioeventfd_del = as_io_ioeventfd_del,
491
};
492

    
493
static AddressSpace address_space_io = {
494
    .ops = &address_space_ops_io,
495
};
496

    
497
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
498
{
499
    while (mr->parent) {
500
        mr = mr->parent;
501
    }
502
    if (mr == address_space_memory.root) {
503
        return &address_space_memory;
504
    }
505
    if (mr == address_space_io.root) {
506
        return &address_space_io;
507
    }
508
    abort();
509
}
510

    
511
/* Render a memory region into the global view.  Ranges in @view obscure
512
 * ranges in @mr.
513
 */
514
static void render_memory_region(FlatView *view,
515
                                 MemoryRegion *mr,
516
                                 Int128 base,
517
                                 AddrRange clip,
518
                                 bool readonly)
519
{
520
    MemoryRegion *subregion;
521
    unsigned i;
522
    target_phys_addr_t offset_in_region;
523
    Int128 remain;
524
    Int128 now;
525
    FlatRange fr;
526
    AddrRange tmp;
527

    
528
    if (!mr->enabled) {
529
        return;
530
    }
531

    
532
    int128_addto(&base, int128_make64(mr->addr));
533
    readonly |= mr->readonly;
534

    
535
    tmp = addrrange_make(base, mr->size);
536

    
537
    if (!addrrange_intersects(tmp, clip)) {
538
        return;
539
    }
540

    
541
    clip = addrrange_intersection(tmp, clip);
542

    
543
    if (mr->alias) {
544
        int128_subfrom(&base, int128_make64(mr->alias->addr));
545
        int128_subfrom(&base, int128_make64(mr->alias_offset));
546
        render_memory_region(view, mr->alias, base, clip, readonly);
547
        return;
548
    }
549

    
550
    /* Render subregions in priority order. */
551
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
552
        render_memory_region(view, subregion, base, clip, readonly);
553
    }
554

    
555
    if (!mr->terminates) {
556
        return;
557
    }
558

    
559
    offset_in_region = int128_get64(int128_sub(clip.start, base));
560
    base = clip.start;
561
    remain = clip.size;
562

    
563
    /* Render the region itself into any gaps left by the current view. */
564
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
565
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
566
            continue;
567
        }
568
        if (int128_lt(base, view->ranges[i].addr.start)) {
569
            now = int128_min(remain,
570
                             int128_sub(view->ranges[i].addr.start, base));
571
            fr.mr = mr;
572
            fr.offset_in_region = offset_in_region;
573
            fr.addr = addrrange_make(base, now);
574
            fr.dirty_log_mask = mr->dirty_log_mask;
575
            fr.readable = mr->readable;
576
            fr.readonly = readonly;
577
            flatview_insert(view, i, &fr);
578
            ++i;
579
            int128_addto(&base, now);
580
            offset_in_region += int128_get64(now);
581
            int128_subfrom(&remain, now);
582
        }
583
        if (int128_eq(base, view->ranges[i].addr.start)) {
584
            now = int128_min(remain, view->ranges[i].addr.size);
585
            int128_addto(&base, now);
586
            offset_in_region += int128_get64(now);
587
            int128_subfrom(&remain, now);
588
        }
589
    }
590
    if (int128_nz(remain)) {
591
        fr.mr = mr;
592
        fr.offset_in_region = offset_in_region;
593
        fr.addr = addrrange_make(base, remain);
594
        fr.dirty_log_mask = mr->dirty_log_mask;
595
        fr.readable = mr->readable;
596
        fr.readonly = readonly;
597
        flatview_insert(view, i, &fr);
598
    }
599
}
600

    
601
/* Render a memory topology into a list of disjoint absolute ranges. */
602
static FlatView generate_memory_topology(MemoryRegion *mr)
603
{
604
    FlatView view;
605

    
606
    flatview_init(&view);
607

    
608
    render_memory_region(&view, mr, int128_zero(),
609
                         addrrange_make(int128_zero(), int128_2_64()), false);
610
    flatview_simplify(&view);
611

    
612
    return view;
613
}
614

    
615
static void address_space_add_del_ioeventfds(AddressSpace *as,
616
                                             MemoryRegionIoeventfd *fds_new,
617
                                             unsigned fds_new_nb,
618
                                             MemoryRegionIoeventfd *fds_old,
619
                                             unsigned fds_old_nb)
620
{
621
    unsigned iold, inew;
622

    
623
    /* Generate a symmetric difference of the old and new fd sets, adding
624
     * and deleting as necessary.
625
     */
626

    
627
    iold = inew = 0;
628
    while (iold < fds_old_nb || inew < fds_new_nb) {
629
        if (iold < fds_old_nb
630
            && (inew == fds_new_nb
631
                || memory_region_ioeventfd_before(fds_old[iold],
632
                                                  fds_new[inew]))) {
633
            as->ops->ioeventfd_del(as, &fds_old[iold]);
634
            ++iold;
635
        } else if (inew < fds_new_nb
636
                   && (iold == fds_old_nb
637
                       || memory_region_ioeventfd_before(fds_new[inew],
638
                                                         fds_old[iold]))) {
639
            as->ops->ioeventfd_add(as, &fds_new[inew]);
640
            ++inew;
641
        } else {
642
            ++iold;
643
            ++inew;
644
        }
645
    }
646
}
647

    
648
static void address_space_update_ioeventfds(AddressSpace *as)
649
{
650
    FlatRange *fr;
651
    unsigned ioeventfd_nb = 0;
652
    MemoryRegionIoeventfd *ioeventfds = NULL;
653
    AddrRange tmp;
654
    unsigned i;
655

    
656
    FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
657
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
658
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
659
                                  int128_sub(fr->addr.start,
660
                                             int128_make64(fr->offset_in_region)));
661
            if (addrrange_intersects(fr->addr, tmp)) {
662
                ++ioeventfd_nb;
663
                ioeventfds = g_realloc(ioeventfds,
664
                                          ioeventfd_nb * sizeof(*ioeventfds));
665
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
666
                ioeventfds[ioeventfd_nb-1].addr = tmp;
667
            }
668
        }
669
    }
670

    
671
    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
672
                                     as->ioeventfds, as->ioeventfd_nb);
673

    
674
    g_free(as->ioeventfds);
675
    as->ioeventfds = ioeventfds;
676
    as->ioeventfd_nb = ioeventfd_nb;
677
}
678

    
679
typedef void ListenerCallback(MemoryListener *listener,
680
                              MemoryRegionSection *mrs);
681

    
682
/* Want "void (&MemoryListener::*callback)(const MemoryRegionSection& s)" */
683
static void memory_listener_update_region(FlatRange *fr, AddressSpace *as,
684
                                          size_t callback_offset)
685
{
686
    MemoryRegionSection section = {
687
        .mr = fr->mr,
688
        .address_space = as->root,
689
        .offset_within_region = fr->offset_in_region,
690
        .size = int128_get64(fr->addr.size),
691
        .offset_within_address_space = int128_get64(fr->addr.start),
692
    };
693
    MemoryListener *listener;
694

    
695
    QLIST_FOREACH(listener, &memory_listeners, link) {
696
        ListenerCallback *callback
697
            = *(ListenerCallback **)((void *)listener + callback_offset);
698
        callback(listener, &section);
699
    }
700
}
701

    
702
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, callback) \
703
    memory_listener_update_region(fr, as, offsetof(MemoryListener, callback))
704

    
705
static void address_space_update_topology_pass(AddressSpace *as,
706
                                               FlatView old_view,
707
                                               FlatView new_view,
708
                                               bool adding)
709
{
710
    unsigned iold, inew;
711
    FlatRange *frold, *frnew;
712

    
713
    /* Generate a symmetric difference of the old and new memory maps.
714
     * Kill ranges in the old map, and instantiate ranges in the new map.
715
     */
716
    iold = inew = 0;
717
    while (iold < old_view.nr || inew < new_view.nr) {
718
        if (iold < old_view.nr) {
719
            frold = &old_view.ranges[iold];
720
        } else {
721
            frold = NULL;
722
        }
723
        if (inew < new_view.nr) {
724
            frnew = &new_view.ranges[inew];
725
        } else {
726
            frnew = NULL;
727
        }
728

    
729
        if (frold
730
            && (!frnew
731
                || int128_lt(frold->addr.start, frnew->addr.start)
732
                || (int128_eq(frold->addr.start, frnew->addr.start)
733
                    && !flatrange_equal(frold, frnew)))) {
734
            /* In old, but (not in new, or in new but attributes changed). */
735

    
736
            if (!adding) {
737
                MEMORY_LISTENER_UPDATE_REGION(frold, as, region_del);
738
                as->ops->range_del(as, frold);
739
            }
740

    
741
            ++iold;
742
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
743
            /* In both (logging may have changed) */
744

    
745
            if (adding) {
746
                if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
747
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, log_stop);
748
                    as->ops->log_stop(as, frnew);
749
                } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
750
                    as->ops->log_start(as, frnew);
751
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, log_start);
752
                }
753
            }
754

    
755
            ++iold;
756
            ++inew;
757
        } else {
758
            /* In new */
759

    
760
            if (adding) {
761
                as->ops->range_add(as, frnew);
762
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, region_add);
763
            }
764

    
765
            ++inew;
766
        }
767
    }
768
}
769

    
770

    
771
static void address_space_update_topology(AddressSpace *as)
772
{
773
    FlatView old_view = as->current_map;
774
    FlatView new_view = generate_memory_topology(as->root);
775

    
776
    address_space_update_topology_pass(as, old_view, new_view, false);
777
    address_space_update_topology_pass(as, old_view, new_view, true);
778

    
779
    as->current_map = new_view;
780
    flatview_destroy(&old_view);
781
    address_space_update_ioeventfds(as);
782
}
783

    
784
static void memory_region_update_topology(MemoryRegion *mr)
785
{
786
    if (memory_region_transaction_depth) {
787
        memory_region_update_pending |= !mr || mr->enabled;
788
        return;
789
    }
790

    
791
    if (mr && !mr->enabled) {
792
        return;
793
    }
794

    
795
    if (address_space_memory.root) {
796
        address_space_update_topology(&address_space_memory);
797
    }
798
    if (address_space_io.root) {
799
        address_space_update_topology(&address_space_io);
800
    }
801

    
802
    memory_region_update_pending = false;
803
}
804

    
805
void memory_region_transaction_begin(void)
806
{
807
    ++memory_region_transaction_depth;
808
}
809

    
810
void memory_region_transaction_commit(void)
811
{
812
    assert(memory_region_transaction_depth);
813
    --memory_region_transaction_depth;
814
    if (!memory_region_transaction_depth && memory_region_update_pending) {
815
        memory_region_update_topology(NULL);
816
    }
817
}
818

    
819
static void memory_region_destructor_none(MemoryRegion *mr)
820
{
821
}
822

    
823
static void memory_region_destructor_ram(MemoryRegion *mr)
824
{
825
    qemu_ram_free(mr->ram_addr);
826
}
827

    
828
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
829
{
830
    qemu_ram_free_from_ptr(mr->ram_addr);
831
}
832

    
833
static void memory_region_destructor_iomem(MemoryRegion *mr)
834
{
835
    cpu_unregister_io_memory(mr->ram_addr);
836
}
837

    
838
static void memory_region_destructor_rom_device(MemoryRegion *mr)
839
{
840
    qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
841
    cpu_unregister_io_memory(mr->ram_addr & ~TARGET_PAGE_MASK);
842
}
843

    
844
static bool memory_region_wrong_endianness(MemoryRegion *mr)
845
{
846
#ifdef TARGET_WORDS_BIGENDIAN
847
    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
848
#else
849
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
850
#endif
851
}
852

    
853
void memory_region_init(MemoryRegion *mr,
854
                        const char *name,
855
                        uint64_t size)
856
{
857
    mr->ops = NULL;
858
    mr->parent = NULL;
859
    mr->size = int128_make64(size);
860
    if (size == UINT64_MAX) {
861
        mr->size = int128_2_64();
862
    }
863
    mr->addr = 0;
864
    mr->offset = 0;
865
    mr->subpage = false;
866
    mr->enabled = true;
867
    mr->terminates = false;
868
    mr->ram = false;
869
    mr->readable = true;
870
    mr->readonly = false;
871
    mr->rom_device = false;
872
    mr->destructor = memory_region_destructor_none;
873
    mr->priority = 0;
874
    mr->may_overlap = false;
875
    mr->alias = NULL;
876
    QTAILQ_INIT(&mr->subregions);
877
    memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
878
    QTAILQ_INIT(&mr->coalesced);
879
    mr->name = g_strdup(name);
880
    mr->dirty_log_mask = 0;
881
    mr->ioeventfd_nb = 0;
882
    mr->ioeventfds = NULL;
883
}
884

    
885
static bool memory_region_access_valid(MemoryRegion *mr,
886
                                       target_phys_addr_t addr,
887
                                       unsigned size,
888
                                       bool is_write)
889
{
890
    if (mr->ops->valid.accepts
891
        && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write)) {
892
        return false;
893
    }
894

    
895
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
896
        return false;
897
    }
898

    
899
    /* Treat zero as compatibility all valid */
900
    if (!mr->ops->valid.max_access_size) {
901
        return true;
902
    }
903

    
904
    if (size > mr->ops->valid.max_access_size
905
        || size < mr->ops->valid.min_access_size) {
906
        return false;
907
    }
908
    return true;
909
}
910

    
911
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
912
                                             target_phys_addr_t addr,
913
                                             unsigned size)
914
{
915
    uint64_t data = 0;
916

    
917
    if (!memory_region_access_valid(mr, addr, size, false)) {
918
        return -1U; /* FIXME: better signalling */
919
    }
920

    
921
    if (!mr->ops->read) {
922
        return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
923
    }
924

    
925
    /* FIXME: support unaligned access */
926
    access_with_adjusted_size(addr + mr->offset, &data, size,
927
                              mr->ops->impl.min_access_size,
928
                              mr->ops->impl.max_access_size,
929
                              memory_region_read_accessor, mr);
930

    
931
    return data;
932
}
933

    
934
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
935
{
936
    if (memory_region_wrong_endianness(mr)) {
937
        switch (size) {
938
        case 1:
939
            break;
940
        case 2:
941
            *data = bswap16(*data);
942
            break;
943
        case 4:
944
            *data = bswap32(*data);
945
        default:
946
            abort();
947
        }
948
    }
949
}
950

    
951
static uint64_t memory_region_dispatch_read(MemoryRegion *mr,
952
                                            target_phys_addr_t addr,
953
                                            unsigned size)
954
{
955
    uint64_t ret;
956

    
957
    ret = memory_region_dispatch_read1(mr, addr, size);
958
    adjust_endianness(mr, &ret, size);
959
    return ret;
960
}
961

    
962
static void memory_region_dispatch_write(MemoryRegion *mr,
963
                                         target_phys_addr_t addr,
964
                                         uint64_t data,
965
                                         unsigned size)
966
{
967
    if (!memory_region_access_valid(mr, addr, size, true)) {
968
        return; /* FIXME: better signalling */
969
    }
970

    
971
    adjust_endianness(mr, &data, size);
972

    
973
    if (!mr->ops->write) {
974
        mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
975
        return;
976
    }
977

    
978
    /* FIXME: support unaligned access */
979
    access_with_adjusted_size(addr + mr->offset, &data, size,
980
                              mr->ops->impl.min_access_size,
981
                              mr->ops->impl.max_access_size,
982
                              memory_region_write_accessor, mr);
983
}
984

    
985
void memory_region_init_io(MemoryRegion *mr,
986
                           const MemoryRegionOps *ops,
987
                           void *opaque,
988
                           const char *name,
989
                           uint64_t size)
990
{
991
    memory_region_init(mr, name, size);
992
    mr->ops = ops;
993
    mr->opaque = opaque;
994
    mr->terminates = true;
995
    mr->destructor = memory_region_destructor_iomem;
996
    mr->ram_addr = cpu_register_io_memory(mr);
997
}
998

    
999
void memory_region_init_ram(MemoryRegion *mr,
1000
                            const char *name,
1001
                            uint64_t size)
1002
{
1003
    memory_region_init(mr, name, size);
1004
    mr->ram = true;
1005
    mr->terminates = true;
1006
    mr->destructor = memory_region_destructor_ram;
1007
    mr->ram_addr = qemu_ram_alloc(size, mr);
1008
}
1009

    
1010
void memory_region_init_ram_ptr(MemoryRegion *mr,
1011
                                const char *name,
1012
                                uint64_t size,
1013
                                void *ptr)
1014
{
1015
    memory_region_init(mr, name, size);
1016
    mr->ram = true;
1017
    mr->terminates = true;
1018
    mr->destructor = memory_region_destructor_ram_from_ptr;
1019
    mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
1020
}
1021

    
1022
void memory_region_init_alias(MemoryRegion *mr,
1023
                              const char *name,
1024
                              MemoryRegion *orig,
1025
                              target_phys_addr_t offset,
1026
                              uint64_t size)
1027
{
1028
    memory_region_init(mr, name, size);
1029
    mr->alias = orig;
1030
    mr->alias_offset = offset;
1031
}
1032

    
1033
void memory_region_init_rom_device(MemoryRegion *mr,
1034
                                   const MemoryRegionOps *ops,
1035
                                   void *opaque,
1036
                                   const char *name,
1037
                                   uint64_t size)
1038
{
1039
    memory_region_init(mr, name, size);
1040
    mr->ops = ops;
1041
    mr->opaque = opaque;
1042
    mr->terminates = true;
1043
    mr->rom_device = true;
1044
    mr->destructor = memory_region_destructor_rom_device;
1045
    mr->ram_addr = qemu_ram_alloc(size, mr);
1046
    mr->ram_addr |= cpu_register_io_memory(mr);
1047
}
1048

    
1049
void memory_region_destroy(MemoryRegion *mr)
1050
{
1051
    assert(QTAILQ_EMPTY(&mr->subregions));
1052
    mr->destructor(mr);
1053
    memory_region_clear_coalescing(mr);
1054
    g_free((char *)mr->name);
1055
    g_free(mr->ioeventfds);
1056
}
1057

    
1058
uint64_t memory_region_size(MemoryRegion *mr)
1059
{
1060
    if (int128_eq(mr->size, int128_2_64())) {
1061
        return UINT64_MAX;
1062
    }
1063
    return int128_get64(mr->size);
1064
}
1065

    
1066
const char *memory_region_name(MemoryRegion *mr)
1067
{
1068
    return mr->name;
1069
}
1070

    
1071
bool memory_region_is_ram(MemoryRegion *mr)
1072
{
1073
    return mr->ram;
1074
}
1075

    
1076
bool memory_region_is_logging(MemoryRegion *mr)
1077
{
1078
    return mr->dirty_log_mask;
1079
}
1080

    
1081
bool memory_region_is_rom(MemoryRegion *mr)
1082
{
1083
    return mr->ram && mr->readonly;
1084
}
1085

    
1086
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
1087
{
1088
    mr->offset = offset;
1089
}
1090

    
1091
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1092
{
1093
    uint8_t mask = 1 << client;
1094

    
1095
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1096
    memory_region_update_topology(mr);
1097
}
1098

    
1099
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1100
                             unsigned client)
1101
{
1102
    assert(mr->terminates);
1103
    return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
1104
}
1105

    
1106
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
1107
{
1108
    assert(mr->terminates);
1109
    return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
1110
}
1111

    
1112
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1113
{
1114
    FlatRange *fr;
1115

    
1116
    FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
1117
        if (fr->mr == mr) {
1118
            MEMORY_LISTENER_UPDATE_REGION(fr, &address_space_memory, log_sync);
1119
        }
1120
    }
1121
}
1122

    
1123
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1124
{
1125
    if (mr->readonly != readonly) {
1126
        mr->readonly = readonly;
1127
        memory_region_update_topology(mr);
1128
    }
1129
}
1130

    
1131
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
1132
{
1133
    if (mr->readable != readable) {
1134
        mr->readable = readable;
1135
        memory_region_update_topology(mr);
1136
    }
1137
}
1138

    
1139
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1140
                               target_phys_addr_t size, unsigned client)
1141
{
1142
    assert(mr->terminates);
1143
    cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
1144
                                    mr->ram_addr + addr + size,
1145
                                    1 << client);
1146
}
1147

    
1148
void *memory_region_get_ram_ptr(MemoryRegion *mr)
1149
{
1150
    if (mr->alias) {
1151
        return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
1152
    }
1153

    
1154
    assert(mr->terminates);
1155

    
1156
    return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
1157
}
1158

    
1159
static void memory_region_update_coalesced_range(MemoryRegion *mr)
1160
{
1161
    FlatRange *fr;
1162
    CoalescedMemoryRange *cmr;
1163
    AddrRange tmp;
1164

    
1165
    FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
1166
        if (fr->mr == mr) {
1167
            qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
1168
                                           int128_get64(fr->addr.size));
1169
            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1170
                tmp = addrrange_shift(cmr->addr,
1171
                                      int128_sub(fr->addr.start,
1172
                                                 int128_make64(fr->offset_in_region)));
1173
                if (!addrrange_intersects(tmp, fr->addr)) {
1174
                    continue;
1175
                }
1176
                tmp = addrrange_intersection(tmp, fr->addr);
1177
                qemu_register_coalesced_mmio(int128_get64(tmp.start),
1178
                                             int128_get64(tmp.size));
1179
            }
1180
        }
1181
    }
1182
}
1183

    
1184
void memory_region_set_coalescing(MemoryRegion *mr)
1185
{
1186
    memory_region_clear_coalescing(mr);
1187
    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1188
}
1189

    
1190
void memory_region_add_coalescing(MemoryRegion *mr,
1191
                                  target_phys_addr_t offset,
1192
                                  uint64_t size)
1193
{
1194
    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1195

    
1196
    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1197
    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1198
    memory_region_update_coalesced_range(mr);
1199
}
1200

    
1201
void memory_region_clear_coalescing(MemoryRegion *mr)
1202
{
1203
    CoalescedMemoryRange *cmr;
1204

    
1205
    while (!QTAILQ_EMPTY(&mr->coalesced)) {
1206
        cmr = QTAILQ_FIRST(&mr->coalesced);
1207
        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1208
        g_free(cmr);
1209
    }
1210
    memory_region_update_coalesced_range(mr);
1211
}
1212

    
1213
void memory_region_add_eventfd(MemoryRegion *mr,
1214
                               target_phys_addr_t addr,
1215
                               unsigned size,
1216
                               bool match_data,
1217
                               uint64_t data,
1218
                               int fd)
1219
{
1220
    MemoryRegionIoeventfd mrfd = {
1221
        .addr.start = int128_make64(addr),
1222
        .addr.size = int128_make64(size),
1223
        .match_data = match_data,
1224
        .data = data,
1225
        .fd = fd,
1226
    };
1227
    unsigned i;
1228

    
1229
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
1230
        if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1231
            break;
1232
        }
1233
    }
1234
    ++mr->ioeventfd_nb;
1235
    mr->ioeventfds = g_realloc(mr->ioeventfds,
1236
                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1237
    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1238
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1239
    mr->ioeventfds[i] = mrfd;
1240
    memory_region_update_topology(mr);
1241
}
1242

    
1243
void memory_region_del_eventfd(MemoryRegion *mr,
1244
                               target_phys_addr_t addr,
1245
                               unsigned size,
1246
                               bool match_data,
1247
                               uint64_t data,
1248
                               int fd)
1249
{
1250
    MemoryRegionIoeventfd mrfd = {
1251
        .addr.start = int128_make64(addr),
1252
        .addr.size = int128_make64(size),
1253
        .match_data = match_data,
1254
        .data = data,
1255
        .fd = fd,
1256
    };
1257
    unsigned i;
1258

    
1259
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
1260
        if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1261
            break;
1262
        }
1263
    }
1264
    assert(i != mr->ioeventfd_nb);
1265
    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1266
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1267
    --mr->ioeventfd_nb;
1268
    mr->ioeventfds = g_realloc(mr->ioeventfds,
1269
                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1270
    memory_region_update_topology(mr);
1271
}
1272

    
1273
static void memory_region_add_subregion_common(MemoryRegion *mr,
1274
                                               target_phys_addr_t offset,
1275
                                               MemoryRegion *subregion)
1276
{
1277
    MemoryRegion *other;
1278

    
1279
    assert(!subregion->parent);
1280
    subregion->parent = mr;
1281
    subregion->addr = offset;
1282
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1283
        if (subregion->may_overlap || other->may_overlap) {
1284
            continue;
1285
        }
1286
        if (int128_gt(int128_make64(offset),
1287
                      int128_add(int128_make64(other->addr), other->size))
1288
            || int128_le(int128_add(int128_make64(offset), subregion->size),
1289
                         int128_make64(other->addr))) {
1290
            continue;
1291
        }
1292
#if 0
1293
        printf("warning: subregion collision %llx/%llx (%s) "
1294
               "vs %llx/%llx (%s)\n",
1295
               (unsigned long long)offset,
1296
               (unsigned long long)int128_get64(subregion->size),
1297
               subregion->name,
1298
               (unsigned long long)other->addr,
1299
               (unsigned long long)int128_get64(other->size),
1300
               other->name);
1301
#endif
1302
    }
1303
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1304
        if (subregion->priority >= other->priority) {
1305
            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1306
            goto done;
1307
        }
1308
    }
1309
    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1310
done:
1311
    memory_region_update_topology(mr);
1312
}
1313

    
1314

    
1315
void memory_region_add_subregion(MemoryRegion *mr,
1316
                                 target_phys_addr_t offset,
1317
                                 MemoryRegion *subregion)
1318
{
1319
    subregion->may_overlap = false;
1320
    subregion->priority = 0;
1321
    memory_region_add_subregion_common(mr, offset, subregion);
1322
}
1323

    
1324
void memory_region_add_subregion_overlap(MemoryRegion *mr,
1325
                                         target_phys_addr_t offset,
1326
                                         MemoryRegion *subregion,
1327
                                         unsigned priority)
1328
{
1329
    subregion->may_overlap = true;
1330
    subregion->priority = priority;
1331
    memory_region_add_subregion_common(mr, offset, subregion);
1332
}
1333

    
1334
void memory_region_del_subregion(MemoryRegion *mr,
1335
                                 MemoryRegion *subregion)
1336
{
1337
    assert(subregion->parent == mr);
1338
    subregion->parent = NULL;
1339
    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1340
    memory_region_update_topology(mr);
1341
}
1342

    
1343
void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
1344
{
1345
    if (enabled == mr->enabled) {
1346
        return;
1347
    }
1348
    mr->enabled = enabled;
1349
    memory_region_update_topology(NULL);
1350
}
1351

    
1352
void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
1353
{
1354
    MemoryRegion *parent = mr->parent;
1355
    unsigned priority = mr->priority;
1356
    bool may_overlap = mr->may_overlap;
1357

    
1358
    if (addr == mr->addr || !parent) {
1359
        mr->addr = addr;
1360
        return;
1361
    }
1362

    
1363
    memory_region_transaction_begin();
1364
    memory_region_del_subregion(parent, mr);
1365
    if (may_overlap) {
1366
        memory_region_add_subregion_overlap(parent, addr, mr, priority);
1367
    } else {
1368
        memory_region_add_subregion(parent, addr, mr);
1369
    }
1370
    memory_region_transaction_commit();
1371
}
1372

    
1373
void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
1374
{
1375
    target_phys_addr_t old_offset = mr->alias_offset;
1376

    
1377
    assert(mr->alias);
1378
    mr->alias_offset = offset;
1379

    
1380
    if (offset == old_offset || !mr->parent) {
1381
        return;
1382
    }
1383

    
1384
    memory_region_update_topology(mr);
1385
}
1386

    
1387
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1388
{
1389
    return mr->ram_addr;
1390
}
1391

    
1392
static int cmp_flatrange_addr(const void *addr_, const void *fr_)
1393
{
1394
    const AddrRange *addr = addr_;
1395
    const FlatRange *fr = fr_;
1396

    
1397
    if (int128_le(addrrange_end(*addr), fr->addr.start)) {
1398
        return -1;
1399
    } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
1400
        return 1;
1401
    }
1402
    return 0;
1403
}
1404

    
1405
static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
1406
{
1407
    return bsearch(&addr, as->current_map.ranges, as->current_map.nr,
1408
                   sizeof(FlatRange), cmp_flatrange_addr);
1409
}
1410

    
1411
MemoryRegionSection memory_region_find(MemoryRegion *address_space,
1412
                                       target_phys_addr_t addr, uint64_t size)
1413
{
1414
    AddressSpace *as = memory_region_to_address_space(address_space);
1415
    AddrRange range = addrrange_make(int128_make64(addr),
1416
                                     int128_make64(size));
1417
    FlatRange *fr = address_space_lookup(as, range);
1418
    MemoryRegionSection ret = { .mr = NULL, .size = 0 };
1419

    
1420
    if (!fr) {
1421
        return ret;
1422
    }
1423

    
1424
    while (fr > as->current_map.ranges
1425
           && addrrange_intersects(fr[-1].addr, range)) {
1426
        --fr;
1427
    }
1428

    
1429
    ret.mr = fr->mr;
1430
    range = addrrange_intersection(range, fr->addr);
1431
    ret.offset_within_region = fr->offset_in_region;
1432
    ret.offset_within_region += int128_get64(int128_sub(range.start,
1433
                                                        fr->addr.start));
1434
    ret.size = int128_get64(range.size);
1435
    ret.offset_within_address_space = int128_get64(range.start);
1436
    return ret;
1437
}
1438

    
1439
void memory_global_sync_dirty_bitmap(MemoryRegion *address_space)
1440
{
1441
    AddressSpace *as = memory_region_to_address_space(address_space);
1442
    FlatRange *fr;
1443

    
1444
    FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
1445
        MEMORY_LISTENER_UPDATE_REGION(fr, as, log_sync);
1446
    }
1447
}
1448

    
1449
void memory_global_dirty_log_start(void)
1450
{
1451
    MemoryListener *listener;
1452

    
1453
    cpu_physical_memory_set_dirty_tracking(1);
1454
    global_dirty_log = true;
1455
    QLIST_FOREACH(listener, &memory_listeners, link) {
1456
        listener->log_global_start(listener);
1457
    }
1458
}
1459

    
1460
void memory_global_dirty_log_stop(void)
1461
{
1462
    MemoryListener *listener;
1463

    
1464
    global_dirty_log = false;
1465
    QLIST_FOREACH(listener, &memory_listeners, link) {
1466
        listener->log_global_stop(listener);
1467
    }
1468
    cpu_physical_memory_set_dirty_tracking(0);
1469
}
1470

    
1471
static void listener_add_address_space(MemoryListener *listener,
1472
                                       AddressSpace *as)
1473
{
1474
    FlatRange *fr;
1475

    
1476
    if (global_dirty_log) {
1477
        listener->log_global_start(listener);
1478
    }
1479
    FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
1480
        MemoryRegionSection section = {
1481
            .mr = fr->mr,
1482
            .address_space = as->root,
1483
            .offset_within_region = fr->offset_in_region,
1484
            .size = int128_get64(fr->addr.size),
1485
            .offset_within_address_space = int128_get64(fr->addr.start),
1486
        };
1487
        listener->region_add(listener, &section);
1488
    }
1489
}
1490

    
1491
void memory_listener_register(MemoryListener *listener)
1492
{
1493
    QLIST_INSERT_HEAD(&memory_listeners, listener, link);
1494
    listener_add_address_space(listener, &address_space_memory);
1495
    listener_add_address_space(listener, &address_space_io);
1496
}
1497

    
1498
void memory_listener_unregister(MemoryListener *listener)
1499
{
1500
    QLIST_REMOVE(listener, link);
1501
}
1502

    
1503
void set_system_memory_map(MemoryRegion *mr)
1504
{
1505
    address_space_memory.root = mr;
1506
    memory_region_update_topology(NULL);
1507
}
1508

    
1509
void set_system_io_map(MemoryRegion *mr)
1510
{
1511
    address_space_io.root = mr;
1512
    memory_region_update_topology(NULL);
1513
}
1514

    
1515
uint64_t io_mem_read(int io_index, target_phys_addr_t addr, unsigned size)
1516
{
1517
    return memory_region_dispatch_read(io_mem_region[io_index], addr, size);
1518
}
1519

    
1520
void io_mem_write(int io_index, target_phys_addr_t addr,
1521
                  uint64_t val, unsigned size)
1522
{
1523
    memory_region_dispatch_write(io_mem_region[io_index], addr, val, size);
1524
}
1525

    
1526
typedef struct MemoryRegionList MemoryRegionList;
1527

    
1528
struct MemoryRegionList {
1529
    const MemoryRegion *mr;
1530
    bool printed;
1531
    QTAILQ_ENTRY(MemoryRegionList) queue;
1532
};
1533

    
1534
typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
1535

    
1536
static void mtree_print_mr(fprintf_function mon_printf, void *f,
1537
                           const MemoryRegion *mr, unsigned int level,
1538
                           target_phys_addr_t base,
1539
                           MemoryRegionListHead *alias_print_queue)
1540
{
1541
    MemoryRegionList *new_ml, *ml, *next_ml;
1542
    MemoryRegionListHead submr_print_queue;
1543
    const MemoryRegion *submr;
1544
    unsigned int i;
1545

    
1546
    if (!mr) {
1547
        return;
1548
    }
1549

    
1550
    for (i = 0; i < level; i++) {
1551
        mon_printf(f, "  ");
1552
    }
1553

    
1554
    if (mr->alias) {
1555
        MemoryRegionList *ml;
1556
        bool found = false;
1557

    
1558
        /* check if the alias is already in the queue */
1559
        QTAILQ_FOREACH(ml, alias_print_queue, queue) {
1560
            if (ml->mr == mr->alias && !ml->printed) {
1561
                found = true;
1562
            }
1563
        }
1564

    
1565
        if (!found) {
1566
            ml = g_new(MemoryRegionList, 1);
1567
            ml->mr = mr->alias;
1568
            ml->printed = false;
1569
            QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
1570
        }
1571
        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s "
1572
                   TARGET_FMT_plx "-" TARGET_FMT_plx "\n",
1573
                   base + mr->addr,
1574
                   base + mr->addr
1575
                   + (target_phys_addr_t)int128_get64(mr->size) - 1,
1576
                   mr->priority,
1577
                   mr->name,
1578
                   mr->alias->name,
1579
                   mr->alias_offset,
1580
                   mr->alias_offset
1581
                   + (target_phys_addr_t)int128_get64(mr->size) - 1);
1582
    } else {
1583
        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n",
1584
                   base + mr->addr,
1585
                   base + mr->addr
1586
                   + (target_phys_addr_t)int128_get64(mr->size) - 1,
1587
                   mr->priority,
1588
                   mr->name);
1589
    }
1590

    
1591
    QTAILQ_INIT(&submr_print_queue);
1592

    
1593
    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
1594
        new_ml = g_new(MemoryRegionList, 1);
1595
        new_ml->mr = submr;
1596
        QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1597
            if (new_ml->mr->addr < ml->mr->addr ||
1598
                (new_ml->mr->addr == ml->mr->addr &&
1599
                 new_ml->mr->priority > ml->mr->priority)) {
1600
                QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
1601
                new_ml = NULL;
1602
                break;
1603
            }
1604
        }
1605
        if (new_ml) {
1606
            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
1607
        }
1608
    }
1609

    
1610
    QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1611
        mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
1612
                       alias_print_queue);
1613
    }
1614

    
1615
    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
1616
        g_free(ml);
1617
    }
1618
}
1619

    
1620
void mtree_info(fprintf_function mon_printf, void *f)
1621
{
1622
    MemoryRegionListHead ml_head;
1623
    MemoryRegionList *ml, *ml2;
1624

    
1625
    QTAILQ_INIT(&ml_head);
1626

    
1627
    mon_printf(f, "memory\n");
1628
    mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head);
1629

    
1630
    /* print aliased regions */
1631
    QTAILQ_FOREACH(ml, &ml_head, queue) {
1632
        if (!ml->printed) {
1633
            mon_printf(f, "%s\n", ml->mr->name);
1634
            mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
1635
        }
1636
    }
1637

    
1638
    QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
1639
        g_free(ml);
1640
    }
1641

    
1642
    if (address_space_io.root &&
1643
        !QTAILQ_EMPTY(&address_space_io.root->subregions)) {
1644
        QTAILQ_INIT(&ml_head);
1645
        mon_printf(f, "I/O\n");
1646
        mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head);
1647
    }
1648
}