root / xen-mapcache.c @ a74cdab4
History | View | Annotate | Download (10.3 kB)
1 |
/*
|
---|---|
2 |
* Copyright (C) 2011 Citrix Ltd.
|
3 |
*
|
4 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
5 |
* the COPYING file in the top-level directory.
|
6 |
*
|
7 |
*/
|
8 |
|
9 |
#include "config.h" |
10 |
|
11 |
#include <sys/resource.h> |
12 |
|
13 |
#include "hw/xen_backend.h" |
14 |
#include "blockdev.h" |
15 |
#include "bitmap.h" |
16 |
|
17 |
#include <xen/hvm/params.h> |
18 |
#include <sys/mman.h> |
19 |
|
20 |
#include "xen-mapcache.h" |
21 |
#include "trace.h" |
22 |
|
23 |
|
24 |
//#define MAPCACHE_DEBUG
|
25 |
|
26 |
#ifdef MAPCACHE_DEBUG
|
27 |
# define DPRINTF(fmt, ...) do { \ |
28 |
fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ |
29 |
} while (0) |
30 |
#else
|
31 |
# define DPRINTF(fmt, ...) do { } while (0) |
32 |
#endif
|
33 |
|
34 |
#if defined(__i386__)
|
35 |
# define MCACHE_BUCKET_SHIFT 16 |
36 |
# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ |
37 |
#elif defined(__x86_64__)
|
38 |
# define MCACHE_BUCKET_SHIFT 20 |
39 |
# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ |
40 |
#endif
|
41 |
#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) |
42 |
|
43 |
typedef struct MapCacheEntry { |
44 |
target_phys_addr_t paddr_index; |
45 |
uint8_t *vaddr_base; |
46 |
DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); |
47 |
uint8_t lock; |
48 |
struct MapCacheEntry *next;
|
49 |
} MapCacheEntry; |
50 |
|
51 |
typedef struct MapCacheRev { |
52 |
uint8_t *vaddr_req; |
53 |
target_phys_addr_t paddr_index; |
54 |
QTAILQ_ENTRY(MapCacheRev) next; |
55 |
} MapCacheRev; |
56 |
|
57 |
typedef struct MapCache { |
58 |
MapCacheEntry *entry; |
59 |
unsigned long nr_buckets; |
60 |
QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries; |
61 |
|
62 |
/* For most cases (>99.9%), the page address is the same. */
|
63 |
target_phys_addr_t last_address_index; |
64 |
uint8_t *last_address_vaddr; |
65 |
unsigned long max_mcache_size; |
66 |
unsigned int mcache_bucket_shift; |
67 |
} MapCache; |
68 |
|
69 |
static MapCache *mapcache;
|
70 |
|
71 |
void qemu_map_cache_init(void) |
72 |
{ |
73 |
unsigned long size; |
74 |
struct rlimit rlimit_as;
|
75 |
|
76 |
mapcache = qemu_mallocz(sizeof (MapCache));
|
77 |
|
78 |
QTAILQ_INIT(&mapcache->locked_entries); |
79 |
mapcache->last_address_index = -1;
|
80 |
|
81 |
getrlimit(RLIMIT_AS, &rlimit_as); |
82 |
if (rlimit_as.rlim_max < MCACHE_MAX_SIZE) {
|
83 |
rlimit_as.rlim_cur = rlimit_as.rlim_max; |
84 |
} else {
|
85 |
rlimit_as.rlim_cur = MCACHE_MAX_SIZE; |
86 |
} |
87 |
|
88 |
setrlimit(RLIMIT_AS, &rlimit_as); |
89 |
mapcache->max_mcache_size = rlimit_as.rlim_cur; |
90 |
|
91 |
mapcache->nr_buckets = |
92 |
(((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + |
93 |
(1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> |
94 |
(MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); |
95 |
|
96 |
size = mapcache->nr_buckets * sizeof (MapCacheEntry);
|
97 |
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); |
98 |
DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size);
|
99 |
mapcache->entry = qemu_mallocz(size); |
100 |
} |
101 |
|
102 |
static void qemu_remap_bucket(MapCacheEntry *entry, |
103 |
target_phys_addr_t size, |
104 |
target_phys_addr_t address_index) |
105 |
{ |
106 |
uint8_t *vaddr_base; |
107 |
xen_pfn_t *pfns; |
108 |
int *err;
|
109 |
unsigned int i; |
110 |
target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT; |
111 |
|
112 |
trace_qemu_remap_bucket(address_index); |
113 |
|
114 |
pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
|
115 |
err = qemu_mallocz(nb_pfn * sizeof (int)); |
116 |
|
117 |
if (entry->vaddr_base != NULL) { |
118 |
if (munmap(entry->vaddr_base, size) != 0) { |
119 |
perror("unmap fails");
|
120 |
exit(-1);
|
121 |
} |
122 |
} |
123 |
|
124 |
for (i = 0; i < nb_pfn; i++) { |
125 |
pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; |
126 |
} |
127 |
|
128 |
vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, |
129 |
pfns, err, nb_pfn); |
130 |
if (vaddr_base == NULL) { |
131 |
perror("xc_map_foreign_bulk");
|
132 |
exit(-1);
|
133 |
} |
134 |
|
135 |
entry->vaddr_base = vaddr_base; |
136 |
entry->paddr_index = address_index; |
137 |
|
138 |
bitmap_zero(entry->valid_mapping, nb_pfn); |
139 |
for (i = 0; i < nb_pfn; i++) { |
140 |
if (!err[i]) {
|
141 |
bitmap_set(entry->valid_mapping, i, 1);
|
142 |
} |
143 |
} |
144 |
|
145 |
qemu_free(pfns); |
146 |
qemu_free(err); |
147 |
} |
148 |
|
149 |
uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock) |
150 |
{ |
151 |
MapCacheEntry *entry, *pentry = NULL;
|
152 |
target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT; |
153 |
target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
|
154 |
|
155 |
trace_qemu_map_cache(phys_addr); |
156 |
|
157 |
if (address_index == mapcache->last_address_index && !lock) {
|
158 |
trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); |
159 |
return mapcache->last_address_vaddr + address_offset;
|
160 |
} |
161 |
|
162 |
entry = &mapcache->entry[address_index % mapcache->nr_buckets]; |
163 |
|
164 |
while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) {
|
165 |
pentry = entry; |
166 |
entry = entry->next; |
167 |
} |
168 |
if (!entry) {
|
169 |
entry = qemu_mallocz(sizeof (MapCacheEntry));
|
170 |
pentry->next = entry; |
171 |
qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); |
172 |
} else if (!entry->lock) { |
173 |
if (!entry->vaddr_base || entry->paddr_index != address_index ||
|
174 |
!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { |
175 |
qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); |
176 |
} |
177 |
} |
178 |
|
179 |
if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) {
|
180 |
mapcache->last_address_index = -1;
|
181 |
trace_qemu_map_cache_return(NULL);
|
182 |
return NULL; |
183 |
} |
184 |
|
185 |
mapcache->last_address_index = address_index; |
186 |
mapcache->last_address_vaddr = entry->vaddr_base; |
187 |
if (lock) {
|
188 |
MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev));
|
189 |
entry->lock++; |
190 |
reventry->vaddr_req = mapcache->last_address_vaddr + address_offset; |
191 |
reventry->paddr_index = mapcache->last_address_index; |
192 |
QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); |
193 |
} |
194 |
|
195 |
trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); |
196 |
return mapcache->last_address_vaddr + address_offset;
|
197 |
} |
198 |
|
199 |
void qemu_map_cache_unlock(void *buffer) |
200 |
{ |
201 |
MapCacheEntry *entry = NULL, *pentry = NULL; |
202 |
MapCacheRev *reventry; |
203 |
target_phys_addr_t paddr_index; |
204 |
int found = 0; |
205 |
|
206 |
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { |
207 |
if (reventry->vaddr_req == buffer) {
|
208 |
paddr_index = reventry->paddr_index; |
209 |
found = 1;
|
210 |
break;
|
211 |
} |
212 |
} |
213 |
if (!found) {
|
214 |
return;
|
215 |
} |
216 |
QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); |
217 |
qemu_free(reventry); |
218 |
|
219 |
entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; |
220 |
while (entry && entry->paddr_index != paddr_index) {
|
221 |
pentry = entry; |
222 |
entry = entry->next; |
223 |
} |
224 |
if (!entry) {
|
225 |
return;
|
226 |
} |
227 |
if (entry->lock > 0) { |
228 |
entry->lock--; |
229 |
} |
230 |
} |
231 |
|
232 |
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
|
233 |
{ |
234 |
MapCacheRev *reventry; |
235 |
target_phys_addr_t paddr_index; |
236 |
int found = 0; |
237 |
|
238 |
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { |
239 |
if (reventry->vaddr_req == ptr) {
|
240 |
paddr_index = reventry->paddr_index; |
241 |
found = 1;
|
242 |
break;
|
243 |
} |
244 |
} |
245 |
if (!found) {
|
246 |
fprintf(stderr, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr);
|
247 |
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { |
248 |
DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, |
249 |
reventry->vaddr_req); |
250 |
} |
251 |
abort(); |
252 |
return 0; |
253 |
} |
254 |
|
255 |
return paddr_index << MCACHE_BUCKET_SHIFT;
|
256 |
} |
257 |
|
258 |
void qemu_invalidate_entry(uint8_t *buffer)
|
259 |
{ |
260 |
MapCacheEntry *entry = NULL, *pentry = NULL; |
261 |
MapCacheRev *reventry; |
262 |
target_phys_addr_t paddr_index; |
263 |
int found = 0; |
264 |
|
265 |
if (mapcache->last_address_vaddr == buffer) {
|
266 |
mapcache->last_address_index = -1;
|
267 |
} |
268 |
|
269 |
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { |
270 |
if (reventry->vaddr_req == buffer) {
|
271 |
paddr_index = reventry->paddr_index; |
272 |
found = 1;
|
273 |
break;
|
274 |
} |
275 |
} |
276 |
if (!found) {
|
277 |
DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer);
|
278 |
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { |
279 |
DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); |
280 |
} |
281 |
return;
|
282 |
} |
283 |
QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); |
284 |
qemu_free(reventry); |
285 |
|
286 |
entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; |
287 |
while (entry && entry->paddr_index != paddr_index) {
|
288 |
pentry = entry; |
289 |
entry = entry->next; |
290 |
} |
291 |
if (!entry) {
|
292 |
DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
|
293 |
return;
|
294 |
} |
295 |
entry->lock--; |
296 |
if (entry->lock > 0 || pentry == NULL) { |
297 |
return;
|
298 |
} |
299 |
|
300 |
pentry->next = entry->next; |
301 |
if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { |
302 |
perror("unmap fails");
|
303 |
exit(-1);
|
304 |
} |
305 |
qemu_free(entry); |
306 |
} |
307 |
|
308 |
void qemu_invalidate_map_cache(void) |
309 |
{ |
310 |
unsigned long i; |
311 |
MapCacheRev *reventry; |
312 |
|
313 |
/* Flush pending AIO before destroying the mapcache */
|
314 |
qemu_aio_flush(); |
315 |
|
316 |
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { |
317 |
DPRINTF("There should be no locked mappings at this time, "
|
318 |
"but "TARGET_FMT_plx" -> %p is present\n", |
319 |
reventry->paddr_index, reventry->vaddr_req); |
320 |
} |
321 |
|
322 |
mapcache_lock(); |
323 |
|
324 |
for (i = 0; i < mapcache->nr_buckets; i++) { |
325 |
MapCacheEntry *entry = &mapcache->entry[i]; |
326 |
|
327 |
if (entry->vaddr_base == NULL) { |
328 |
continue;
|
329 |
} |
330 |
|
331 |
if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { |
332 |
perror("unmap fails");
|
333 |
exit(-1);
|
334 |
} |
335 |
|
336 |
entry->paddr_index = 0;
|
337 |
entry->vaddr_base = NULL;
|
338 |
} |
339 |
|
340 |
mapcache->last_address_index = -1;
|
341 |
mapcache->last_address_vaddr = NULL;
|
342 |
|
343 |
mapcache_unlock(); |
344 |
} |
345 |
|
346 |
uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size) |
347 |
{ |
348 |
uint8_t *vaddr_base; |
349 |
xen_pfn_t *pfns; |
350 |
int *err;
|
351 |
unsigned int i; |
352 |
target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT; |
353 |
|
354 |
trace_xen_map_block(phys_addr, size); |
355 |
phys_addr >>= XC_PAGE_SHIFT; |
356 |
|
357 |
pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
|
358 |
err = qemu_mallocz(nb_pfn * sizeof (int)); |
359 |
|
360 |
for (i = 0; i < nb_pfn; i++) { |
361 |
pfns[i] = phys_addr + i; |
362 |
} |
363 |
|
364 |
vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, |
365 |
pfns, err, nb_pfn); |
366 |
if (vaddr_base == NULL) { |
367 |
perror("xc_map_foreign_bulk");
|
368 |
exit(-1);
|
369 |
} |
370 |
|
371 |
qemu_free(pfns); |
372 |
qemu_free(err); |
373 |
|
374 |
return vaddr_base;
|
375 |
} |