Statistics
| Branch: | Revision:

root / memory_mapping.c @ feature-archipelago

History | View | Annotate | Download (10.3 kB)

1
/*
2
 * QEMU memory mapping
3
 *
4
 * Copyright Fujitsu, Corp. 2011, 2012
5
 *
6
 * Authors:
7
 *     Wen Congyang <wency@cn.fujitsu.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10
 * See the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include <glib.h>
15

    
16
#include "cpu.h"
17
#include "exec/cpu-all.h"
18
#include "sysemu/memory_mapping.h"
19
#include "exec/memory.h"
20
#include "exec/address-spaces.h"
21

    
22
//#define DEBUG_GUEST_PHYS_REGION_ADD
23

    
24
static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
25
                                                   MemoryMapping *mapping)
26
{
27
    MemoryMapping *p;
28

    
29
    QTAILQ_FOREACH(p, &list->head, next) {
30
        if (p->phys_addr >= mapping->phys_addr) {
31
            QTAILQ_INSERT_BEFORE(p, mapping, next);
32
            return;
33
        }
34
    }
35
    QTAILQ_INSERT_TAIL(&list->head, mapping, next);
36
}
37

    
38
static void create_new_memory_mapping(MemoryMappingList *list,
39
                                      hwaddr phys_addr,
40
                                      hwaddr virt_addr,
41
                                      ram_addr_t length)
42
{
43
    MemoryMapping *memory_mapping;
44

    
45
    memory_mapping = g_malloc(sizeof(MemoryMapping));
46
    memory_mapping->phys_addr = phys_addr;
47
    memory_mapping->virt_addr = virt_addr;
48
    memory_mapping->length = length;
49
    list->last_mapping = memory_mapping;
50
    list->num++;
51
    memory_mapping_list_add_mapping_sorted(list, memory_mapping);
52
}
53

    
54
static inline bool mapping_contiguous(MemoryMapping *map,
55
                                      hwaddr phys_addr,
56
                                      hwaddr virt_addr)
57
{
58
    return phys_addr == map->phys_addr + map->length &&
59
           virt_addr == map->virt_addr + map->length;
60
}
61

    
62
/*
63
 * [map->phys_addr, map->phys_addr + map->length) and
64
 * [phys_addr, phys_addr + length) have intersection?
65
 */
66
static inline bool mapping_have_same_region(MemoryMapping *map,
67
                                            hwaddr phys_addr,
68
                                            ram_addr_t length)
69
{
70
    return !(phys_addr + length < map->phys_addr ||
71
             phys_addr >= map->phys_addr + map->length);
72
}
73

    
74
/*
75
 * [map->phys_addr, map->phys_addr + map->length) and
76
 * [phys_addr, phys_addr + length) have intersection. The virtual address in the
77
 * intersection are the same?
78
 */
79
static inline bool mapping_conflict(MemoryMapping *map,
80
                                    hwaddr phys_addr,
81
                                    hwaddr virt_addr)
82
{
83
    return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
84
}
85

    
86
/*
87
 * [map->virt_addr, map->virt_addr + map->length) and
88
 * [virt_addr, virt_addr + length) have intersection. And the physical address
89
 * in the intersection are the same.
90
 */
91
static inline void mapping_merge(MemoryMapping *map,
92
                                 hwaddr virt_addr,
93
                                 ram_addr_t length)
94
{
95
    if (virt_addr < map->virt_addr) {
96
        map->length += map->virt_addr - virt_addr;
97
        map->virt_addr = virt_addr;
98
    }
99

    
100
    if ((virt_addr + length) >
101
        (map->virt_addr + map->length)) {
102
        map->length = virt_addr + length - map->virt_addr;
103
    }
104
}
105

    
106
void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
107
                                          hwaddr phys_addr,
108
                                          hwaddr virt_addr,
109
                                          ram_addr_t length)
110
{
111
    MemoryMapping *memory_mapping, *last_mapping;
112

    
113
    if (QTAILQ_EMPTY(&list->head)) {
114
        create_new_memory_mapping(list, phys_addr, virt_addr, length);
115
        return;
116
    }
117

    
118
    last_mapping = list->last_mapping;
119
    if (last_mapping) {
120
        if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
121
            last_mapping->length += length;
122
            return;
123
        }
124
    }
125

    
126
    QTAILQ_FOREACH(memory_mapping, &list->head, next) {
127
        if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
128
            memory_mapping->length += length;
129
            list->last_mapping = memory_mapping;
130
            return;
131
        }
132

    
133
        if (phys_addr + length < memory_mapping->phys_addr) {
134
            /* create a new region before memory_mapping */
135
            break;
136
        }
137

    
138
        if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
139
            if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
140
                continue;
141
            }
142

    
143
            /* merge this region into memory_mapping */
144
            mapping_merge(memory_mapping, virt_addr, length);
145
            list->last_mapping = memory_mapping;
146
            return;
147
        }
148
    }
149

    
150
    /* this region can not be merged into any existed memory mapping. */
151
    create_new_memory_mapping(list, phys_addr, virt_addr, length);
152
}
153

    
154
void memory_mapping_list_free(MemoryMappingList *list)
155
{
156
    MemoryMapping *p, *q;
157

    
158
    QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
159
        QTAILQ_REMOVE(&list->head, p, next);
160
        g_free(p);
161
    }
162

    
163
    list->num = 0;
164
    list->last_mapping = NULL;
165
}
166

    
167
void memory_mapping_list_init(MemoryMappingList *list)
168
{
169
    list->num = 0;
170
    list->last_mapping = NULL;
171
    QTAILQ_INIT(&list->head);
172
}
173

    
174
void guest_phys_blocks_free(GuestPhysBlockList *list)
175
{
176
    GuestPhysBlock *p, *q;
177

    
178
    QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
179
        QTAILQ_REMOVE(&list->head, p, next);
180
        g_free(p);
181
    }
182
    list->num = 0;
183
}
184

    
185
void guest_phys_blocks_init(GuestPhysBlockList *list)
186
{
187
    list->num = 0;
188
    QTAILQ_INIT(&list->head);
189
}
190

    
191
typedef struct GuestPhysListener {
192
    GuestPhysBlockList *list;
193
    MemoryListener listener;
194
} GuestPhysListener;
195

    
196
static void guest_phys_blocks_region_add(MemoryListener *listener,
197
                                         MemoryRegionSection *section)
198
{
199
    GuestPhysListener *g;
200
    uint64_t section_size;
201
    hwaddr target_start, target_end;
202
    uint8_t *host_addr;
203
    GuestPhysBlock *predecessor;
204

    
205
    /* we only care about RAM */
206
    if (!memory_region_is_ram(section->mr)) {
207
        return;
208
    }
209

    
210
    g            = container_of(listener, GuestPhysListener, listener);
211
    section_size = int128_get64(section->size);
212
    target_start = section->offset_within_address_space;
213
    target_end   = target_start + section_size;
214
    host_addr    = memory_region_get_ram_ptr(section->mr) +
215
                   section->offset_within_region;
216
    predecessor  = NULL;
217

    
218
    /* find continuity in guest physical address space */
219
    if (!QTAILQ_EMPTY(&g->list->head)) {
220
        hwaddr predecessor_size;
221

    
222
        predecessor = QTAILQ_LAST(&g->list->head, GuestPhysBlockHead);
223
        predecessor_size = predecessor->target_end - predecessor->target_start;
224

    
225
        /* the memory API guarantees monotonically increasing traversal */
226
        g_assert(predecessor->target_end <= target_start);
227

    
228
        /* we want continuity in both guest-physical and host-virtual memory */
229
        if (predecessor->target_end < target_start ||
230
            predecessor->host_addr + predecessor_size != host_addr) {
231
            predecessor = NULL;
232
        }
233
    }
234

    
235
    if (predecessor == NULL) {
236
        /* isolated mapping, allocate it and add it to the list */
237
        GuestPhysBlock *block = g_malloc0(sizeof *block);
238

    
239
        block->target_start = target_start;
240
        block->target_end   = target_end;
241
        block->host_addr    = host_addr;
242

    
243
        QTAILQ_INSERT_TAIL(&g->list->head, block, next);
244
        ++g->list->num;
245
    } else {
246
        /* expand predecessor until @target_end; predecessor's start doesn't
247
         * change
248
         */
249
        predecessor->target_end = target_end;
250
    }
251

    
252
#ifdef DEBUG_GUEST_PHYS_REGION_ADD
253
    fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end="
254
            TARGET_FMT_plx ": %s (count: %u)\n", __FUNCTION__, target_start,
255
            target_end, predecessor ? "joined" : "added", g->list->num);
256
#endif
257
}
258

    
259
void guest_phys_blocks_append(GuestPhysBlockList *list)
260
{
261
    GuestPhysListener g = { 0 };
262

    
263
    g.list = list;
264
    g.listener.region_add = &guest_phys_blocks_region_add;
265
    memory_listener_register(&g.listener, &address_space_memory);
266
    memory_listener_unregister(&g.listener);
267
}
268

    
269
static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
270
{
271
    CPUState *cpu;
272

    
273
    CPU_FOREACH(cpu) {
274
        if (cpu_paging_enabled(cpu)) {
275
            return cpu;
276
        }
277
    }
278

    
279
    return NULL;
280
}
281

    
282
void qemu_get_guest_memory_mapping(MemoryMappingList *list,
283
                                   const GuestPhysBlockList *guest_phys_blocks,
284
                                   Error **errp)
285
{
286
    CPUState *cpu, *first_paging_enabled_cpu;
287
    GuestPhysBlock *block;
288
    ram_addr_t offset, length;
289

    
290
    first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
291
    if (first_paging_enabled_cpu) {
292
        for (cpu = first_paging_enabled_cpu; cpu != NULL;
293
             cpu = CPU_NEXT(cpu)) {
294
            Error *err = NULL;
295
            cpu_get_memory_mapping(cpu, list, &err);
296
            if (err) {
297
                error_propagate(errp, err);
298
                return;
299
            }
300
        }
301
        return;
302
    }
303

    
304
    /*
305
     * If the guest doesn't use paging, the virtual address is equal to physical
306
     * address.
307
     */
308
    QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
309
        offset = block->target_start;
310
        length = block->target_end - block->target_start;
311
        create_new_memory_mapping(list, offset, offset, length);
312
    }
313
}
314

    
315
void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
316
                                   const GuestPhysBlockList *guest_phys_blocks)
317
{
318
    GuestPhysBlock *block;
319

    
320
    QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
321
        create_new_memory_mapping(list, block->target_start, 0,
322
                                  block->target_end - block->target_start);
323
    }
324
}
325

    
326
void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
327
                           int64_t length)
328
{
329
    MemoryMapping *cur, *next;
330

    
331
    QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
332
        if (cur->phys_addr >= begin + length ||
333
            cur->phys_addr + cur->length <= begin) {
334
            QTAILQ_REMOVE(&list->head, cur, next);
335
            list->num--;
336
            continue;
337
        }
338

    
339
        if (cur->phys_addr < begin) {
340
            cur->length -= begin - cur->phys_addr;
341
            if (cur->virt_addr) {
342
                cur->virt_addr += begin - cur->phys_addr;
343
            }
344
            cur->phys_addr = begin;
345
        }
346

    
347
        if (cur->phys_addr + cur->length > begin + length) {
348
            cur->length -= cur->phys_addr + cur->length - begin - length;
349
        }
350
    }
351
}