Statistics
| Branch: | Revision:

root / memory.c @ 658b2224

History | View | Annotate | Download (23.6 kB)

1
/*
2
 * Physical memory management
3
 *
4
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5
 *
6
 * Authors:
7
 *  Avi Kivity <avi@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "memory.h"
15
#include "exec-memory.h"
16
#include "ioport.h"
17
#include <assert.h>
18

    
19
typedef struct AddrRange AddrRange;
20

    
21
struct AddrRange {
22
    uint64_t start;
23
    uint64_t size;
24
};
25

    
26
static AddrRange addrrange_make(uint64_t start, uint64_t size)
27
{
28
    return (AddrRange) { start, size };
29
}
30

    
31
static bool addrrange_equal(AddrRange r1, AddrRange r2)
32
{
33
    return r1.start == r2.start && r1.size == r2.size;
34
}
35

    
36
static uint64_t addrrange_end(AddrRange r)
37
{
38
    return r.start + r.size;
39
}
40

    
41
static AddrRange addrrange_shift(AddrRange range, int64_t delta)
42
{
43
    range.start += delta;
44
    return range;
45
}
46

    
47
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
48
{
49
    return (r1.start >= r2.start && r1.start < r2.start + r2.size)
50
        || (r2.start >= r1.start && r2.start < r1.start + r1.size);
51
}
52

    
53
static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
54
{
55
    uint64_t start = MAX(r1.start, r2.start);
56
    /* off-by-one arithmetic to prevent overflow */
57
    uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
58
    return addrrange_make(start, end - start + 1);
59
}
60

    
61
struct CoalescedMemoryRange {
62
    AddrRange addr;
63
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
64
};
65

    
66
typedef struct FlatRange FlatRange;
67
typedef struct FlatView FlatView;
68

    
69
/* Range of memory in the global map.  Addresses are absolute. */
70
struct FlatRange {
71
    MemoryRegion *mr;
72
    target_phys_addr_t offset_in_region;
73
    AddrRange addr;
74
    uint8_t dirty_log_mask;
75
};
76

    
77
/* Flattened global view of current active memory hierarchy.  Kept in sorted
78
 * order.
79
 */
80
struct FlatView {
81
    FlatRange *ranges;
82
    unsigned nr;
83
    unsigned nr_allocated;
84
};
85

    
86
typedef struct AddressSpace AddressSpace;
87
typedef struct AddressSpaceOps AddressSpaceOps;
88

    
89
/* A system address space - I/O, memory, etc. */
90
struct AddressSpace {
91
    const AddressSpaceOps *ops;
92
    MemoryRegion *root;
93
    FlatView current_map;
94
};
95

    
96
struct AddressSpaceOps {
97
    void (*range_add)(AddressSpace *as, FlatRange *fr);
98
    void (*range_del)(AddressSpace *as, FlatRange *fr);
99
    void (*log_start)(AddressSpace *as, FlatRange *fr);
100
    void (*log_stop)(AddressSpace *as, FlatRange *fr);
101
};
102

    
103
#define FOR_EACH_FLAT_RANGE(var, view)          \
104
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
105

    
106
static bool flatrange_equal(FlatRange *a, FlatRange *b)
107
{
108
    return a->mr == b->mr
109
        && addrrange_equal(a->addr, b->addr)
110
        && a->offset_in_region == b->offset_in_region;
111
}
112

    
113
static void flatview_init(FlatView *view)
114
{
115
    view->ranges = NULL;
116
    view->nr = 0;
117
    view->nr_allocated = 0;
118
}
119

    
120
/* Insert a range into a given position.  Caller is responsible for maintaining
121
 * sorting order.
122
 */
123
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
124
{
125
    if (view->nr == view->nr_allocated) {
126
        view->nr_allocated = MAX(2 * view->nr, 10);
127
        view->ranges = qemu_realloc(view->ranges,
128
                                    view->nr_allocated * sizeof(*view->ranges));
129
    }
130
    memmove(view->ranges + pos + 1, view->ranges + pos,
131
            (view->nr - pos) * sizeof(FlatRange));
132
    view->ranges[pos] = *range;
133
    ++view->nr;
134
}
135

    
136
static void flatview_destroy(FlatView *view)
137
{
138
    qemu_free(view->ranges);
139
}
140

    
141
static bool can_merge(FlatRange *r1, FlatRange *r2)
142
{
143
    return addrrange_end(r1->addr) == r2->addr.start
144
        && r1->mr == r2->mr
145
        && r1->offset_in_region + r1->addr.size == r2->offset_in_region
146
        && r1->dirty_log_mask == r2->dirty_log_mask;
147
}
148

    
149
/* Attempt to simplify a view by merging ajacent ranges */
150
static void flatview_simplify(FlatView *view)
151
{
152
    unsigned i, j;
153

    
154
    i = 0;
155
    while (i < view->nr) {
156
        j = i + 1;
157
        while (j < view->nr
158
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
159
            view->ranges[i].addr.size += view->ranges[j].addr.size;
160
            ++j;
161
        }
162
        ++i;
163
        memmove(&view->ranges[i], &view->ranges[j],
164
                (view->nr - j) * sizeof(view->ranges[j]));
165
        view->nr -= j - i;
166
    }
167
}
168

    
169
static void memory_region_prepare_ram_addr(MemoryRegion *mr);
170

    
171
static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
172
{
173
    ram_addr_t phys_offset, region_offset;
174

    
175
    memory_region_prepare_ram_addr(fr->mr);
176

    
177
    phys_offset = fr->mr->ram_addr;
178
    region_offset = fr->offset_in_region;
179
    /* cpu_register_physical_memory_log() wants region_offset for
180
     * mmio, but prefers offseting phys_offset for RAM.  Humour it.
181
     */
182
    if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
183
        phys_offset += region_offset;
184
        region_offset = 0;
185
    }
186

    
187
    cpu_register_physical_memory_log(fr->addr.start,
188
                                     fr->addr.size,
189
                                     phys_offset,
190
                                     region_offset,
191
                                     fr->dirty_log_mask);
192
}
193

    
194
static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
195
{
196
    cpu_register_physical_memory(fr->addr.start, fr->addr.size,
197
                                 IO_MEM_UNASSIGNED);
198
}
199

    
200
static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
201
{
202
    cpu_physical_log_start(fr->addr.start, fr->addr.size);
203
}
204

    
205
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
206
{
207
    cpu_physical_log_stop(fr->addr.start, fr->addr.size);
208
}
209

    
210
static const AddressSpaceOps address_space_ops_memory = {
211
    .range_add = as_memory_range_add,
212
    .range_del = as_memory_range_del,
213
    .log_start = as_memory_log_start,
214
    .log_stop = as_memory_log_stop,
215
};
216

    
217
static AddressSpace address_space_memory = {
218
    .ops = &address_space_ops_memory,
219
};
220

    
221
static void memory_region_iorange_read(IORange *iorange,
222
                                       uint64_t offset,
223
                                       unsigned width,
224
                                       uint64_t *data)
225
{
226
    MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
227

    
228
    *data = mr->ops->read(mr->opaque, offset, width);
229
}
230

    
231
static void memory_region_iorange_write(IORange *iorange,
232
                                        uint64_t offset,
233
                                        unsigned width,
234
                                        uint64_t data)
235
{
236
    MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
237

    
238
    mr->ops->write(mr->opaque, offset, data, width);
239
}
240

    
241
static const IORangeOps memory_region_iorange_ops = {
242
    .read = memory_region_iorange_read,
243
    .write = memory_region_iorange_write,
244
};
245

    
246
static void as_io_range_add(AddressSpace *as, FlatRange *fr)
247
{
248
    iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
249
                 fr->addr.start,fr->addr.size);
250
    ioport_register(&fr->mr->iorange);
251
}
252

    
253
static void as_io_range_del(AddressSpace *as, FlatRange *fr)
254
{
255
    isa_unassign_ioport(fr->addr.start, fr->addr.size);
256
}
257

    
258
static const AddressSpaceOps address_space_ops_io = {
259
    .range_add = as_io_range_add,
260
    .range_del = as_io_range_del,
261
};
262

    
263
static AddressSpace address_space_io = {
264
    .ops = &address_space_ops_io,
265
};
266

    
267
/* Render a memory region into the global view.  Ranges in @view obscure
268
 * ranges in @mr.
269
 */
270
static void render_memory_region(FlatView *view,
271
                                 MemoryRegion *mr,
272
                                 target_phys_addr_t base,
273
                                 AddrRange clip)
274
{
275
    MemoryRegion *subregion;
276
    unsigned i;
277
    target_phys_addr_t offset_in_region;
278
    uint64_t remain;
279
    uint64_t now;
280
    FlatRange fr;
281
    AddrRange tmp;
282

    
283
    base += mr->addr;
284

    
285
    tmp = addrrange_make(base, mr->size);
286

    
287
    if (!addrrange_intersects(tmp, clip)) {
288
        return;
289
    }
290

    
291
    clip = addrrange_intersection(tmp, clip);
292

    
293
    if (mr->alias) {
294
        base -= mr->alias->addr;
295
        base -= mr->alias_offset;
296
        render_memory_region(view, mr->alias, base, clip);
297
        return;
298
    }
299

    
300
    /* Render subregions in priority order. */
301
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
302
        render_memory_region(view, subregion, base, clip);
303
    }
304

    
305
    if (!mr->terminates) {
306
        return;
307
    }
308

    
309
    offset_in_region = clip.start - base;
310
    base = clip.start;
311
    remain = clip.size;
312

    
313
    /* Render the region itself into any gaps left by the current view. */
314
    for (i = 0; i < view->nr && remain; ++i) {
315
        if (base >= addrrange_end(view->ranges[i].addr)) {
316
            continue;
317
        }
318
        if (base < view->ranges[i].addr.start) {
319
            now = MIN(remain, view->ranges[i].addr.start - base);
320
            fr.mr = mr;
321
            fr.offset_in_region = offset_in_region;
322
            fr.addr = addrrange_make(base, now);
323
            fr.dirty_log_mask = mr->dirty_log_mask;
324
            flatview_insert(view, i, &fr);
325
            ++i;
326
            base += now;
327
            offset_in_region += now;
328
            remain -= now;
329
        }
330
        if (base == view->ranges[i].addr.start) {
331
            now = MIN(remain, view->ranges[i].addr.size);
332
            base += now;
333
            offset_in_region += now;
334
            remain -= now;
335
        }
336
    }
337
    if (remain) {
338
        fr.mr = mr;
339
        fr.offset_in_region = offset_in_region;
340
        fr.addr = addrrange_make(base, remain);
341
        fr.dirty_log_mask = mr->dirty_log_mask;
342
        flatview_insert(view, i, &fr);
343
    }
344
}
345

    
346
/* Render a memory topology into a list of disjoint absolute ranges. */
347
static FlatView generate_memory_topology(MemoryRegion *mr)
348
{
349
    FlatView view;
350

    
351
    flatview_init(&view);
352

    
353
    render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
354
    flatview_simplify(&view);
355

    
356
    return view;
357
}
358

    
359
static void address_space_update_topology(AddressSpace *as)
360
{
361
    FlatView old_view = as->current_map;
362
    FlatView new_view = generate_memory_topology(as->root);
363
    unsigned iold, inew;
364
    FlatRange *frold, *frnew;
365

    
366
    /* Generate a symmetric difference of the old and new memory maps.
367
     * Kill ranges in the old map, and instantiate ranges in the new map.
368
     */
369
    iold = inew = 0;
370
    while (iold < old_view.nr || inew < new_view.nr) {
371
        if (iold < old_view.nr) {
372
            frold = &old_view.ranges[iold];
373
        } else {
374
            frold = NULL;
375
        }
376
        if (inew < new_view.nr) {
377
            frnew = &new_view.ranges[inew];
378
        } else {
379
            frnew = NULL;
380
        }
381

    
382
        if (frold
383
            && (!frnew
384
                || frold->addr.start < frnew->addr.start
385
                || (frold->addr.start == frnew->addr.start
386
                    && !flatrange_equal(frold, frnew)))) {
387
            /* In old, but (not in new, or in new but attributes changed). */
388

    
389
            as->ops->range_del(as, frold);
390
            ++iold;
391
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
392
            /* In both (logging may have changed) */
393

    
394
            if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
395
                as->ops->log_stop(as, frnew);
396
            } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
397
                as->ops->log_start(as, frnew);
398
            }
399

    
400
            ++iold;
401
            ++inew;
402
        } else {
403
            /* In new */
404

    
405
            as->ops->range_add(as, frnew);
406
            ++inew;
407
        }
408
    }
409
    as->current_map = new_view;
410
    flatview_destroy(&old_view);
411
}
412

    
413
static void memory_region_update_topology(void)
414
{
415
    if (address_space_memory.root) {
416
        address_space_update_topology(&address_space_memory);
417
    }
418
    if (address_space_io.root) {
419
        address_space_update_topology(&address_space_io);
420
    }
421
}
422

    
423
void memory_region_init(MemoryRegion *mr,
424
                        const char *name,
425
                        uint64_t size)
426
{
427
    mr->ops = NULL;
428
    mr->parent = NULL;
429
    mr->size = size;
430
    mr->addr = 0;
431
    mr->offset = 0;
432
    mr->terminates = false;
433
    mr->priority = 0;
434
    mr->may_overlap = false;
435
    mr->alias = NULL;
436
    QTAILQ_INIT(&mr->subregions);
437
    memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
438
    QTAILQ_INIT(&mr->coalesced);
439
    mr->name = qemu_strdup(name);
440
    mr->dirty_log_mask = 0;
441
}
442

    
443
static bool memory_region_access_valid(MemoryRegion *mr,
444
                                       target_phys_addr_t addr,
445
                                       unsigned size)
446
{
447
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
448
        return false;
449
    }
450

    
451
    /* Treat zero as compatibility all valid */
452
    if (!mr->ops->valid.max_access_size) {
453
        return true;
454
    }
455

    
456
    if (size > mr->ops->valid.max_access_size
457
        || size < mr->ops->valid.min_access_size) {
458
        return false;
459
    }
460
    return true;
461
}
462

    
463
static uint32_t memory_region_read_thunk_n(void *_mr,
464
                                           target_phys_addr_t addr,
465
                                           unsigned size)
466
{
467
    MemoryRegion *mr = _mr;
468
    unsigned access_size, access_size_min, access_size_max;
469
    uint64_t access_mask;
470
    uint32_t data = 0, tmp;
471
    unsigned i;
472

    
473
    if (!memory_region_access_valid(mr, addr, size)) {
474
        return -1U; /* FIXME: better signalling */
475
    }
476

    
477
    /* FIXME: support unaligned access */
478

    
479
    access_size_min = mr->ops->impl.min_access_size;
480
    if (!access_size_min) {
481
        access_size_min = 1;
482
    }
483
    access_size_max = mr->ops->impl.max_access_size;
484
    if (!access_size_max) {
485
        access_size_max = 4;
486
    }
487
    access_size = MAX(MIN(size, access_size_max), access_size_min);
488
    access_mask = -1ULL >> (64 - access_size * 8);
489
    addr += mr->offset;
490
    for (i = 0; i < size; i += access_size) {
491
        /* FIXME: big-endian support */
492
        tmp = mr->ops->read(mr->opaque, addr + i, access_size);
493
        data |= (tmp & access_mask) << (i * 8);
494
    }
495

    
496
    return data;
497
}
498

    
499
static void memory_region_write_thunk_n(void *_mr,
500
                                        target_phys_addr_t addr,
501
                                        unsigned size,
502
                                        uint64_t data)
503
{
504
    MemoryRegion *mr = _mr;
505
    unsigned access_size, access_size_min, access_size_max;
506
    uint64_t access_mask;
507
    unsigned i;
508

    
509
    if (!memory_region_access_valid(mr, addr, size)) {
510
        return; /* FIXME: better signalling */
511
    }
512

    
513
    /* FIXME: support unaligned access */
514

    
515
    access_size_min = mr->ops->impl.min_access_size;
516
    if (!access_size_min) {
517
        access_size_min = 1;
518
    }
519
    access_size_max = mr->ops->impl.max_access_size;
520
    if (!access_size_max) {
521
        access_size_max = 4;
522
    }
523
    access_size = MAX(MIN(size, access_size_max), access_size_min);
524
    access_mask = -1ULL >> (64 - access_size * 8);
525
    addr += mr->offset;
526
    for (i = 0; i < size; i += access_size) {
527
        /* FIXME: big-endian support */
528
        mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
529
                       access_size);
530
    }
531
}
532

    
533
static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
534
{
535
    return memory_region_read_thunk_n(mr, addr, 1);
536
}
537

    
538
static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
539
{
540
    return memory_region_read_thunk_n(mr, addr, 2);
541
}
542

    
543
static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
544
{
545
    return memory_region_read_thunk_n(mr, addr, 4);
546
}
547

    
548
static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
549
                                        uint32_t data)
550
{
551
    memory_region_write_thunk_n(mr, addr, 1, data);
552
}
553

    
554
static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
555
                                        uint32_t data)
556
{
557
    memory_region_write_thunk_n(mr, addr, 2, data);
558
}
559

    
560
static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
561
                                        uint32_t data)
562
{
563
    memory_region_write_thunk_n(mr, addr, 4, data);
564
}
565

    
566
static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
567
    memory_region_read_thunk_b,
568
    memory_region_read_thunk_w,
569
    memory_region_read_thunk_l,
570
};
571

    
572
static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
573
    memory_region_write_thunk_b,
574
    memory_region_write_thunk_w,
575
    memory_region_write_thunk_l,
576
};
577

    
578
static void memory_region_prepare_ram_addr(MemoryRegion *mr)
579
{
580
    if (mr->backend_registered) {
581
        return;
582
    }
583

    
584
    mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
585
                                          memory_region_write_thunk,
586
                                          mr,
587
                                          mr->ops->endianness);
588
    mr->backend_registered = true;
589
}
590

    
591
void memory_region_init_io(MemoryRegion *mr,
592
                           const MemoryRegionOps *ops,
593
                           void *opaque,
594
                           const char *name,
595
                           uint64_t size)
596
{
597
    memory_region_init(mr, name, size);
598
    mr->ops = ops;
599
    mr->opaque = opaque;
600
    mr->terminates = true;
601
    mr->backend_registered = false;
602
}
603

    
604
void memory_region_init_ram(MemoryRegion *mr,
605
                            DeviceState *dev,
606
                            const char *name,
607
                            uint64_t size)
608
{
609
    memory_region_init(mr, name, size);
610
    mr->terminates = true;
611
    mr->ram_addr = qemu_ram_alloc(dev, name, size);
612
    mr->backend_registered = true;
613
}
614

    
615
void memory_region_init_ram_ptr(MemoryRegion *mr,
616
                                DeviceState *dev,
617
                                const char *name,
618
                                uint64_t size,
619
                                void *ptr)
620
{
621
    memory_region_init(mr, name, size);
622
    mr->terminates = true;
623
    mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
624
    mr->backend_registered = true;
625
}
626

    
627
void memory_region_init_alias(MemoryRegion *mr,
628
                              const char *name,
629
                              MemoryRegion *orig,
630
                              target_phys_addr_t offset,
631
                              uint64_t size)
632
{
633
    memory_region_init(mr, name, size);
634
    mr->alias = orig;
635
    mr->alias_offset = offset;
636
}
637

    
638
void memory_region_destroy(MemoryRegion *mr)
639
{
640
    assert(QTAILQ_EMPTY(&mr->subregions));
641
    memory_region_clear_coalescing(mr);
642
    qemu_free((char *)mr->name);
643
}
644

    
645
uint64_t memory_region_size(MemoryRegion *mr)
646
{
647
    return mr->size;
648
}
649

    
650
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
651
{
652
    mr->offset = offset;
653
}
654

    
655
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
656
{
657
    uint8_t mask = 1 << client;
658

    
659
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
660
    memory_region_update_topology();
661
}
662

    
663
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
664
                             unsigned client)
665
{
666
    assert(mr->terminates);
667
    return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
668
}
669

    
670
void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
671
{
672
    assert(mr->terminates);
673
    return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
674
}
675

    
676
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
677
{
678
    FlatRange *fr;
679

    
680
    FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
681
        if (fr->mr == mr) {
682
            cpu_physical_sync_dirty_bitmap(fr->addr.start,
683
                                           fr->addr.start + fr->addr.size);
684
        }
685
    }
686
}
687

    
688
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
689
{
690
    /* FIXME */
691
}
692

    
693
void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
694
                               target_phys_addr_t size, unsigned client)
695
{
696
    assert(mr->terminates);
697
    cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
698
                                    mr->ram_addr + addr + size,
699
                                    1 << client);
700
}
701

    
702
void *memory_region_get_ram_ptr(MemoryRegion *mr)
703
{
704
    if (mr->alias) {
705
        return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
706
    }
707

    
708
    assert(mr->terminates);
709

    
710
    return qemu_get_ram_ptr(mr->ram_addr);
711
}
712

    
713
static void memory_region_update_coalesced_range(MemoryRegion *mr)
714
{
715
    FlatRange *fr;
716
    CoalescedMemoryRange *cmr;
717
    AddrRange tmp;
718

    
719
    FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
720
        if (fr->mr == mr) {
721
            qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
722
            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
723
                tmp = addrrange_shift(cmr->addr,
724
                                      fr->addr.start - fr->offset_in_region);
725
                if (!addrrange_intersects(tmp, fr->addr)) {
726
                    continue;
727
                }
728
                tmp = addrrange_intersection(tmp, fr->addr);
729
                qemu_register_coalesced_mmio(tmp.start, tmp.size);
730
            }
731
        }
732
    }
733
}
734

    
735
void memory_region_set_coalescing(MemoryRegion *mr)
736
{
737
    memory_region_clear_coalescing(mr);
738
    memory_region_add_coalescing(mr, 0, mr->size);
739
}
740

    
741
void memory_region_add_coalescing(MemoryRegion *mr,
742
                                  target_phys_addr_t offset,
743
                                  uint64_t size)
744
{
745
    CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
746

    
747
    cmr->addr = addrrange_make(offset, size);
748
    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
749
    memory_region_update_coalesced_range(mr);
750
}
751

    
752
void memory_region_clear_coalescing(MemoryRegion *mr)
753
{
754
    CoalescedMemoryRange *cmr;
755

    
756
    while (!QTAILQ_EMPTY(&mr->coalesced)) {
757
        cmr = QTAILQ_FIRST(&mr->coalesced);
758
        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
759
        qemu_free(cmr);
760
    }
761
    memory_region_update_coalesced_range(mr);
762
}
763

    
764
static void memory_region_add_subregion_common(MemoryRegion *mr,
765
                                               target_phys_addr_t offset,
766
                                               MemoryRegion *subregion)
767
{
768
    MemoryRegion *other;
769

    
770
    assert(!subregion->parent);
771
    subregion->parent = mr;
772
    subregion->addr = offset;
773
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
774
        if (subregion->may_overlap || other->may_overlap) {
775
            continue;
776
        }
777
        if (offset >= other->offset + other->size
778
            || offset + subregion->size <= other->offset) {
779
            continue;
780
        }
781
        printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
782
               (unsigned long long)offset,
783
               (unsigned long long)subregion->size,
784
               (unsigned long long)other->offset,
785
               (unsigned long long)other->size);
786
    }
787
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
788
        if (subregion->priority >= other->priority) {
789
            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
790
            goto done;
791
        }
792
    }
793
    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
794
done:
795
    memory_region_update_topology();
796
}
797

    
798

    
799
void memory_region_add_subregion(MemoryRegion *mr,
800
                                 target_phys_addr_t offset,
801
                                 MemoryRegion *subregion)
802
{
803
    subregion->may_overlap = false;
804
    subregion->priority = 0;
805
    memory_region_add_subregion_common(mr, offset, subregion);
806
}
807

    
808
void memory_region_add_subregion_overlap(MemoryRegion *mr,
809
                                         target_phys_addr_t offset,
810
                                         MemoryRegion *subregion,
811
                                         unsigned priority)
812
{
813
    subregion->may_overlap = true;
814
    subregion->priority = priority;
815
    memory_region_add_subregion_common(mr, offset, subregion);
816
}
817

    
818
void memory_region_del_subregion(MemoryRegion *mr,
819
                                 MemoryRegion *subregion)
820
{
821
    assert(subregion->parent == mr);
822
    subregion->parent = NULL;
823
    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
824
    memory_region_update_topology();
825
}
826

    
827
void set_system_memory_map(MemoryRegion *mr)
828
{
829
    address_space_memory.root = mr;
830
    memory_region_update_topology();
831
}
832

    
833
void set_system_io_map(MemoryRegion *mr)
834
{
835
    address_space_io.root = mr;
836
    memory_region_update_topology();
837
}