Statistics
| Branch: | Revision:

root / xen-mapcache.c @ a8170e5e

History | View | Annotate | Download (11.8 kB)

1
/*
2
 * Copyright (C) 2011       Citrix Ltd.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 * Contributions after 2012-01-13 are licensed under the terms of the
8
 * GNU GPL, version 2 or (at your option) any later version.
9
 */
10

    
11
#include "config.h"
12

    
13
#include <sys/resource.h>
14

    
15
#include "hw/xen_backend.h"
16
#include "blockdev.h"
17
#include "bitmap.h"
18

    
19
#include <xen/hvm/params.h>
20
#include <sys/mman.h>
21

    
22
#include "xen-mapcache.h"
23
#include "trace.h"
24

    
25

    
26
//#define MAPCACHE_DEBUG
27

    
28
#ifdef MAPCACHE_DEBUG
29
#  define DPRINTF(fmt, ...) do { \
30
    fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31
} while (0)
32
#else
33
#  define DPRINTF(fmt, ...) do { } while (0)
34
#endif
35

    
36
#if defined(__i386__)
37
#  define MCACHE_BUCKET_SHIFT 16
38
#  define MCACHE_MAX_SIZE     (1UL<<31) /* 2GB Cap */
39
#elif defined(__x86_64__)
40
#  define MCACHE_BUCKET_SHIFT 20
41
#  define MCACHE_MAX_SIZE     (1UL<<35) /* 32GB Cap */
42
#endif
43
#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
44

    
45
/* This is the size of the virtual address space reserve to QEMU that will not
46
 * be use by MapCache.
47
 * From empirical tests I observed that qemu use 75MB more than the
48
 * max_mcache_size.
49
 */
50
#define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
51

    
52
#define mapcache_lock()   ((void)0)
53
#define mapcache_unlock() ((void)0)
54

    
55
typedef struct MapCacheEntry {
56
    hwaddr paddr_index;
57
    uint8_t *vaddr_base;
58
    unsigned long *valid_mapping;
59
    uint8_t lock;
60
    hwaddr size;
61
    struct MapCacheEntry *next;
62
} MapCacheEntry;
63

    
64
typedef struct MapCacheRev {
65
    uint8_t *vaddr_req;
66
    hwaddr paddr_index;
67
    hwaddr size;
68
    QTAILQ_ENTRY(MapCacheRev) next;
69
} MapCacheRev;
70

    
71
typedef struct MapCache {
72
    MapCacheEntry *entry;
73
    unsigned long nr_buckets;
74
    QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
75

    
76
    /* For most cases (>99.9%), the page address is the same. */
77
    hwaddr last_address_index;
78
    uint8_t *last_address_vaddr;
79
    unsigned long max_mcache_size;
80
    unsigned int mcache_bucket_shift;
81

    
82
    phys_offset_to_gaddr_t phys_offset_to_gaddr;
83
    void *opaque;
84
} MapCache;
85

    
86
static MapCache *mapcache;
87

    
88
static inline int test_bits(int nr, int size, const unsigned long *addr)
89
{
90
    unsigned long res = find_next_zero_bit(addr, size + nr, nr);
91
    if (res >= nr + size)
92
        return 1;
93
    else
94
        return 0;
95
}
96

    
97
void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
98
{
99
    unsigned long size;
100
    struct rlimit rlimit_as;
101

    
102
    mapcache = g_malloc0(sizeof (MapCache));
103

    
104
    mapcache->phys_offset_to_gaddr = f;
105
    mapcache->opaque = opaque;
106

    
107
    QTAILQ_INIT(&mapcache->locked_entries);
108
    mapcache->last_address_index = -1;
109

    
110
    if (geteuid() == 0) {
111
        rlimit_as.rlim_cur = RLIM_INFINITY;
112
        rlimit_as.rlim_max = RLIM_INFINITY;
113
        mapcache->max_mcache_size = MCACHE_MAX_SIZE;
114
    } else {
115
        getrlimit(RLIMIT_AS, &rlimit_as);
116
        rlimit_as.rlim_cur = rlimit_as.rlim_max;
117

    
118
        if (rlimit_as.rlim_max != RLIM_INFINITY) {
119
            fprintf(stderr, "Warning: QEMU's maximum size of virtual"
120
                    " memory is not infinity.\n");
121
        }
122
        if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
123
            mapcache->max_mcache_size = rlimit_as.rlim_max -
124
                NON_MCACHE_MEMORY_SIZE;
125
        } else {
126
            mapcache->max_mcache_size = MCACHE_MAX_SIZE;
127
        }
128
    }
129

    
130
    setrlimit(RLIMIT_AS, &rlimit_as);
131

    
132
    mapcache->nr_buckets =
133
        (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
134
          (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
135
         (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
136

    
137
    size = mapcache->nr_buckets * sizeof (MapCacheEntry);
138
    size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
139
    DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
140
            mapcache->nr_buckets, size);
141
    mapcache->entry = g_malloc0(size);
142
}
143

    
144
static void xen_remap_bucket(MapCacheEntry *entry,
145
                             hwaddr size,
146
                             hwaddr address_index)
147
{
148
    uint8_t *vaddr_base;
149
    xen_pfn_t *pfns;
150
    int *err;
151
    unsigned int i;
152
    hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
153

    
154
    trace_xen_remap_bucket(address_index);
155

    
156
    pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
157
    err = g_malloc0(nb_pfn * sizeof (int));
158

    
159
    if (entry->vaddr_base != NULL) {
160
        if (munmap(entry->vaddr_base, entry->size) != 0) {
161
            perror("unmap fails");
162
            exit(-1);
163
        }
164
    }
165
    if (entry->valid_mapping != NULL) {
166
        g_free(entry->valid_mapping);
167
        entry->valid_mapping = NULL;
168
    }
169

    
170
    for (i = 0; i < nb_pfn; i++) {
171
        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
172
    }
173

    
174
    vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE,
175
                                     pfns, err, nb_pfn);
176
    if (vaddr_base == NULL) {
177
        perror("xc_map_foreign_bulk");
178
        exit(-1);
179
    }
180

    
181
    entry->vaddr_base = vaddr_base;
182
    entry->paddr_index = address_index;
183
    entry->size = size;
184
    entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
185
            BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
186

    
187
    bitmap_zero(entry->valid_mapping, nb_pfn);
188
    for (i = 0; i < nb_pfn; i++) {
189
        if (!err[i]) {
190
            bitmap_set(entry->valid_mapping, i, 1);
191
        }
192
    }
193

    
194
    g_free(pfns);
195
    g_free(err);
196
}
197

    
198
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
199
                       uint8_t lock)
200
{
201
    MapCacheEntry *entry, *pentry = NULL;
202
    hwaddr address_index;
203
    hwaddr address_offset;
204
    hwaddr __size = size;
205
    bool translated = false;
206

    
207
tryagain:
208
    address_index  = phys_addr >> MCACHE_BUCKET_SHIFT;
209
    address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
210

    
211
    trace_xen_map_cache(phys_addr);
212

    
213
    if (address_index == mapcache->last_address_index && !lock && !__size) {
214
        trace_xen_map_cache_return(mapcache->last_address_vaddr + address_offset);
215
        return mapcache->last_address_vaddr + address_offset;
216
    }
217

    
218
    /* size is always a multiple of MCACHE_BUCKET_SIZE */
219
    if (size) {
220
        __size = size + address_offset;
221
        if (__size % MCACHE_BUCKET_SIZE) {
222
            __size += MCACHE_BUCKET_SIZE - (__size % MCACHE_BUCKET_SIZE);
223
        }
224
    } else {
225
        __size = MCACHE_BUCKET_SIZE;
226
    }
227

    
228
    entry = &mapcache->entry[address_index % mapcache->nr_buckets];
229

    
230
    while (entry && entry->lock && entry->vaddr_base &&
231
            (entry->paddr_index != address_index || entry->size != __size ||
232
             !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
233
                 entry->valid_mapping))) {
234
        pentry = entry;
235
        entry = entry->next;
236
    }
237
    if (!entry) {
238
        entry = g_malloc0(sizeof (MapCacheEntry));
239
        pentry->next = entry;
240
        xen_remap_bucket(entry, __size, address_index);
241
    } else if (!entry->lock) {
242
        if (!entry->vaddr_base || entry->paddr_index != address_index ||
243
                entry->size != __size ||
244
                !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
245
                    entry->valid_mapping)) {
246
            xen_remap_bucket(entry, __size, address_index);
247
        }
248
    }
249

    
250
    if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
251
                entry->valid_mapping)) {
252
        mapcache->last_address_index = -1;
253
        if (!translated && mapcache->phys_offset_to_gaddr) {
254
            phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
255
            translated = true;
256
            goto tryagain;
257
        }
258
        trace_xen_map_cache_return(NULL);
259
        return NULL;
260
    }
261

    
262
    mapcache->last_address_index = address_index;
263
    mapcache->last_address_vaddr = entry->vaddr_base;
264
    if (lock) {
265
        MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
266
        entry->lock++;
267
        reventry->vaddr_req = mapcache->last_address_vaddr + address_offset;
268
        reventry->paddr_index = mapcache->last_address_index;
269
        reventry->size = entry->size;
270
        QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
271
    }
272

    
273
    trace_xen_map_cache_return(mapcache->last_address_vaddr + address_offset);
274
    return mapcache->last_address_vaddr + address_offset;
275
}
276

    
277
ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
278
{
279
    MapCacheEntry *entry = NULL;
280
    MapCacheRev *reventry;
281
    hwaddr paddr_index;
282
    hwaddr size;
283
    int found = 0;
284

    
285
    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
286
        if (reventry->vaddr_req == ptr) {
287
            paddr_index = reventry->paddr_index;
288
            size = reventry->size;
289
            found = 1;
290
            break;
291
        }
292
    }
293
    if (!found) {
294
        fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
295
        QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
296
            DPRINTF("   "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
297
                    reventry->vaddr_req);
298
        }
299
        abort();
300
        return 0;
301
    }
302

    
303
    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
304
    while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
305
        entry = entry->next;
306
    }
307
    if (!entry) {
308
        DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
309
        return 0;
310
    }
311
    return (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
312
        ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
313
}
314

    
315
void xen_invalidate_map_cache_entry(uint8_t *buffer)
316
{
317
    MapCacheEntry *entry = NULL, *pentry = NULL;
318
    MapCacheRev *reventry;
319
    hwaddr paddr_index;
320
    hwaddr size;
321
    int found = 0;
322

    
323
    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
324
        if (reventry->vaddr_req == buffer) {
325
            paddr_index = reventry->paddr_index;
326
            size = reventry->size;
327
            found = 1;
328
            break;
329
        }
330
    }
331
    if (!found) {
332
        DPRINTF("%s, could not find %p\n", __func__, buffer);
333
        QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
334
            DPRINTF("   "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
335
        }
336
        return;
337
    }
338
    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
339
    g_free(reventry);
340

    
341
    if (mapcache->last_address_index == paddr_index) {
342
        mapcache->last_address_index = -1;
343
        mapcache->last_address_vaddr = NULL;
344
    }
345

    
346
    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
347
    while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
348
        pentry = entry;
349
        entry = entry->next;
350
    }
351
    if (!entry) {
352
        DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
353
        return;
354
    }
355
    entry->lock--;
356
    if (entry->lock > 0 || pentry == NULL) {
357
        return;
358
    }
359

    
360
    pentry->next = entry->next;
361
    if (munmap(entry->vaddr_base, entry->size) != 0) {
362
        perror("unmap fails");
363
        exit(-1);
364
    }
365
    g_free(entry->valid_mapping);
366
    g_free(entry);
367
}
368

    
369
void xen_invalidate_map_cache(void)
370
{
371
    unsigned long i;
372
    MapCacheRev *reventry;
373

    
374
    /* Flush pending AIO before destroying the mapcache */
375
    bdrv_drain_all();
376

    
377
    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
378
        DPRINTF("There should be no locked mappings at this time, "
379
                "but "TARGET_FMT_plx" -> %p is present\n",
380
                reventry->paddr_index, reventry->vaddr_req);
381
    }
382

    
383
    mapcache_lock();
384

    
385
    for (i = 0; i < mapcache->nr_buckets; i++) {
386
        MapCacheEntry *entry = &mapcache->entry[i];
387

    
388
        if (entry->vaddr_base == NULL) {
389
            continue;
390
        }
391
        if (entry->lock > 0) {
392
            continue;
393
        }
394

    
395
        if (munmap(entry->vaddr_base, entry->size) != 0) {
396
            perror("unmap fails");
397
            exit(-1);
398
        }
399

    
400
        entry->paddr_index = 0;
401
        entry->vaddr_base = NULL;
402
        entry->size = 0;
403
        g_free(entry->valid_mapping);
404
        entry->valid_mapping = NULL;
405
    }
406

    
407
    mapcache->last_address_index = -1;
408
    mapcache->last_address_vaddr = NULL;
409

    
410
    mapcache_unlock();
411
}