Revision 432d268c

b/Makefile.target
214 214
  CONFIG_NO_XEN = y
215 215
endif
216 216
# xen support
217
CONFIG_NO_XEN_MAPCACHE = $(if $(subst n,,$(CONFIG_XEN_MAPCACHE)),n,y)
217 218
obj-i386-$(CONFIG_XEN) += xen-all.o
218 219
obj-$(CONFIG_NO_XEN) += xen-stub.o
220
obj-i386-$(CONFIG_XEN_MAPCACHE) += xen-mapcache.o
221
obj-$(CONFIG_NO_XEN_MAPCACHE) += xen-mapcache-stub.o
219 222

  
220 223
# Inter-VM PCI shared memory
221 224
CONFIG_IVSHMEM =
b/configure
3299 3299
  i386|x86_64)
3300 3300
    if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then
3301 3301
      echo "CONFIG_XEN=y" >> $config_target_mak
3302
      if test "$cpu" = "i386" -o "$cpu" = "x86_64"; then
3303
          echo "CONFIG_XEN_MAPCACHE=y" >> $config_target_mak
3304
      fi
3302 3305
    fi
3303 3306
esac
3304 3307
case "$target_arch2" in
b/exec.c
32 32
#include "hw/qdev.h"
33 33
#include "osdep.h"
34 34
#include "kvm.h"
35
#include "hw/xen.h"
35 36
#include "qemu-timer.h"
36 37
#if defined(CONFIG_USER_ONLY)
37 38
#include <qemu.h>
......
51 52
#include <libutil.h>
52 53
#endif
53 54
#endif
55
#else /* !CONFIG_USER_ONLY */
56
#include "xen-mapcache.h"
54 57
#endif
55 58

  
56 59
//#define DEBUG_TB_INVALIDATE
......
2889 2892
        }
2890 2893
    }
2891 2894

  
2895
    new_block->offset = find_ram_offset(size);
2892 2896
    if (host) {
2893 2897
        new_block->host = host;
2894 2898
        new_block->flags |= RAM_PREALLOC_MASK;
......
2911 2915
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2912 2916
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2913 2917
#else
2914
            new_block->host = qemu_vmalloc(size);
2918
            if (xen_mapcache_enabled()) {
2919
                xen_ram_alloc(new_block->offset, size);
2920
            } else {
2921
                new_block->host = qemu_vmalloc(size);
2922
            }
2915 2923
#endif
2916 2924
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2917 2925
        }
2918 2926
    }
2919

  
2920
    new_block->offset = find_ram_offset(size);
2921 2927
    new_block->length = size;
2922 2928

  
2923 2929
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
......
2962 2968
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2963 2969
                munmap(block->host, block->length);
2964 2970
#else
2965
                qemu_vfree(block->host);
2971
                if (xen_mapcache_enabled()) {
2972
                    qemu_invalidate_entry(block->host);
2973
                } else {
2974
                    qemu_vfree(block->host);
2975
                }
2966 2976
#endif
2967 2977
            }
2968 2978
            qemu_free(block);
......
3051 3061
                QLIST_REMOVE(block, next);
3052 3062
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3053 3063
            }
3064
            if (xen_mapcache_enabled()) {
3065
                /* We need to check if the requested address is in the RAM
3066
                 * because we don't want to map the entire memory in QEMU.
3067
                 */
3068
                if (block->offset == 0) {
3069
                    return qemu_map_cache(addr, 0, 1);
3070
                } else if (block->host == NULL) {
3071
                    block->host = xen_map_block(block->offset, block->length);
3072
                }
3073
            }
3054 3074
            return block->host + (addr - block->offset);
3055 3075
        }
3056 3076
    }
......
3070 3090

  
3071 3091
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 3092
        if (addr - block->offset < block->length) {
3093
            if (xen_mapcache_enabled()) {
3094
                /* We need to check if the requested address is in the RAM
3095
                 * because we don't want to map the entire memory in QEMU.
3096
                 */
3097
                if (block->offset == 0) {
3098
                    return qemu_map_cache(addr, 0, 1);
3099
                } else if (block->host == NULL) {
3100
                    block->host = xen_map_block(block->offset, block->length);
3101
                }
3102
            }
3073 3103
            return block->host + (addr - block->offset);
3074 3104
        }
3075 3105
    }
......
3086 3116
    uint8_t *host = ptr;
3087 3117

  
3088 3118
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3119
        /* This case append when the block is not mapped. */
3120
        if (block->host == NULL) {
3121
            continue;
3122
        }
3089 3123
        if (host - block->host < block->length) {
3090 3124
            *ram_addr = block->offset + (host - block->host);
3091 3125
            return 0;
3092 3126
        }
3093 3127
    }
3128

  
3129
    if (xen_mapcache_enabled()) {
3130
        *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3131
        return 0;
3132
    }
3133

  
3094 3134
    return -1;
3095 3135
}
3096 3136

  
b/hw/xen.h
31 31
#endif
32 32
}
33 33

  
34
static inline int xen_mapcache_enabled(void)
35
{
36
#ifdef CONFIG_XEN_MAPCACHE
37
    return xen_enabled();
38
#else
39
    return 0;
40
#endif
41
}
42

  
34 43
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
35 44
void xen_piix3_set_irq(void *opaque, int irq_num, int level);
36 45
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
......
41 50
int xen_hvm_init(void);
42 51
void xen_vcpu_init(void);
43 52

  
53
#if defined(NEED_CPU_H) && !defined(CONFIG_USER_ONLY)
54
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size);
55
#endif
56

  
44 57
#if defined(CONFIG_XEN) && CONFIG_XEN_CTRL_INTERFACE_VERSION < 400
45 58
#  define HVM_MAX_VCPUS 32
46 59
#endif
b/hw/xen_common.h
63 63
}
64 64

  
65 65

  
66
static inline int xc_domain_populate_physmap_exact
67
    (XenXC xc_handle, uint32_t domid, unsigned long nr_extents,
68
     unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start)
69
{
70
    return xc_domain_memory_populate_physmap
71
        (xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start);
72
}
73

  
74

  
66 75
/* Xen 4.1 */
67 76
#else
68 77

  
b/trace-events
361 361
# hw/milkymist-vgafb.c
362 362
disable milkymist_vgafb_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x"
363 363
disable milkymist_vgafb_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x"
364

  
365
# xen-all.c
366
disable xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx"
367

  
368
# xen-mapcache.c
369
disable qemu_map_cache(uint64_t phys_addr) "want %#"PRIx64""
370
disable qemu_remap_bucket(uint64_t index) "index %#"PRIx64""
371
disable qemu_map_cache_return(void* ptr) "%p"
372
disable xen_map_block(uint64_t phys_addr, uint64_t size) "%#"PRIx64", size %#"PRIx64""
373
disable xen_unmap_block(void* addr, unsigned long size) "%p, size %#lx"
b/xen-all.c
10 10
#include "hw/xen_common.h"
11 11
#include "hw/xen_backend.h"
12 12

  
13
#include "xen-mapcache.h"
14
#include "trace.h"
15

  
13 16
/* Xen specific function for piix pci */
14 17

  
15 18
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
......
52 55
    return qemu_allocate_irqs(xen_set_irq, NULL, 16);
53 56
}
54 57

  
58
/* Memory Ops */
59

  
60
static void xen_ram_init(ram_addr_t ram_size)
61
{
62
    RAMBlock *new_block;
63
    ram_addr_t below_4g_mem_size, above_4g_mem_size = 0;
64

  
65
    new_block = qemu_mallocz(sizeof (*new_block));
66
    pstrcpy(new_block->idstr, sizeof (new_block->idstr), "xen.ram");
67
    new_block->host = NULL;
68
    new_block->offset = 0;
69
    new_block->length = ram_size;
70

  
71
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
72

  
73
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
74
                                       new_block->length >> TARGET_PAGE_BITS);
75
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
76
           0xff, new_block->length >> TARGET_PAGE_BITS);
77

  
78
    if (ram_size >= 0xe0000000 ) {
79
        above_4g_mem_size = ram_size - 0xe0000000;
80
        below_4g_mem_size = 0xe0000000;
81
    } else {
82
        below_4g_mem_size = ram_size;
83
    }
84

  
85
    cpu_register_physical_memory(0, below_4g_mem_size, new_block->offset);
86
#if TARGET_PHYS_ADDR_BITS > 32
87
    if (above_4g_mem_size > 0) {
88
        cpu_register_physical_memory(0x100000000ULL, above_4g_mem_size,
89
                                     new_block->offset + below_4g_mem_size);
90
    }
91
#endif
92
}
93

  
94
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
95
{
96
    unsigned long nr_pfn;
97
    xen_pfn_t *pfn_list;
98
    int i;
99

  
100
    trace_xen_ram_alloc(ram_addr, size);
101

  
102
    nr_pfn = size >> TARGET_PAGE_BITS;
103
    pfn_list = qemu_malloc(sizeof (*pfn_list) * nr_pfn);
104

  
105
    for (i = 0; i < nr_pfn; i++) {
106
        pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
107
    }
108

  
109
    if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
110
        hw_error("xen: failed to populate ram at %lx", ram_addr);
111
    }
112

  
113
    qemu_free(pfn_list);
114
}
115

  
116

  
55 117
/* VCPU Operations, MMIO, IO ring ... */
56 118

  
57 119
static void xen_reset_vcpu(void *opaque)
......
86 148

  
87 149
int xen_hvm_init(void)
88 150
{
151
    /* Init RAM management */
152
    qemu_map_cache_init();
153
    xen_ram_init(ram_size);
154

  
89 155
    return 0;
90 156
}
b/xen-mapcache-stub.c
1
/*
2
 * Copyright (C) 2011       Citrix Ltd.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 */
8

  
9
#include "config.h"
10

  
11
#include "exec-all.h"
12
#include "qemu-common.h"
13
#include "cpu-common.h"
14
#include "xen-mapcache.h"
15

  
16
void qemu_map_cache_init(void)
17
{
18
}
19

  
20
uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock)
21
{
22
    return qemu_get_ram_ptr(phys_addr);
23
}
24

  
25
void qemu_map_cache_unlock(void *buffer)
26
{
27
}
28

  
29
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
30
{
31
    return -1;
32
}
33

  
34
void qemu_invalidate_map_cache(void)
35
{
36
}
37

  
38
void qemu_invalidate_entry(uint8_t *buffer)
39
{
40
}
41
uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size)
42
{
43
    return NULL;
44
}
b/xen-mapcache.c
1
/*
2
 * Copyright (C) 2011       Citrix Ltd.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 */
8

  
9
#include "config.h"
10

  
11
#include <sys/resource.h>
12

  
13
#include "hw/xen_backend.h"
14
#include "blockdev.h"
15

  
16
#include <xen/hvm/params.h>
17
#include <sys/mman.h>
18

  
19
#include "xen-mapcache.h"
20
#include "trace.h"
21

  
22

  
23
//#define MAPCACHE_DEBUG
24

  
25
#ifdef MAPCACHE_DEBUG
26
#  define DPRINTF(fmt, ...) do { \
27
    fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
28
} while (0)
29
#else
30
#  define DPRINTF(fmt, ...) do { } while (0)
31
#endif
32

  
33
#if defined(__i386__)
34
#  define MCACHE_BUCKET_SHIFT 16
35
#elif defined(__x86_64__)
36
#  define MCACHE_BUCKET_SHIFT 20
37
#endif
38
#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
39

  
40
#define BITS_PER_LONG (sizeof(long) * 8)
41
#define BITS_TO_LONGS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
42
#define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)]
43

  
44
typedef struct MapCacheEntry {
45
    target_phys_addr_t paddr_index;
46
    uint8_t *vaddr_base;
47
    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT);
48
    uint8_t lock;
49
    struct MapCacheEntry *next;
50
} MapCacheEntry;
51

  
52
typedef struct MapCacheRev {
53
    uint8_t *vaddr_req;
54
    target_phys_addr_t paddr_index;
55
    QTAILQ_ENTRY(MapCacheRev) next;
56
} MapCacheRev;
57

  
58
typedef struct MapCache {
59
    MapCacheEntry *entry;
60
    unsigned long nr_buckets;
61
    QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
62

  
63
    /* For most cases (>99.9%), the page address is the same. */
64
    target_phys_addr_t last_address_index;
65
    uint8_t *last_address_vaddr;
66
    unsigned long max_mcache_size;
67
    unsigned int mcache_bucket_shift;
68
} MapCache;
69

  
70
static MapCache *mapcache;
71

  
72
static inline int test_bit(unsigned int bit, const unsigned long *map)
73
{
74
    return !!((map)[(bit) / BITS_PER_LONG] & (1UL << ((bit) % BITS_PER_LONG)));
75
}
76

  
77
void qemu_map_cache_init(void)
78
{
79
    unsigned long size;
80
    struct rlimit rlimit_as;
81

  
82
    mapcache = qemu_mallocz(sizeof (MapCache));
83

  
84
    QTAILQ_INIT(&mapcache->locked_entries);
85
    mapcache->last_address_index = -1;
86

  
87
    getrlimit(RLIMIT_AS, &rlimit_as);
88
    rlimit_as.rlim_cur = rlimit_as.rlim_max;
89
    setrlimit(RLIMIT_AS, &rlimit_as);
90
    mapcache->max_mcache_size = rlimit_as.rlim_max;
91

  
92
    mapcache->nr_buckets =
93
        (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
94
          (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
95
         (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
96

  
97
    size = mapcache->nr_buckets * sizeof (MapCacheEntry);
98
    size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
99
    DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size);
100
    mapcache->entry = qemu_mallocz(size);
101
}
102

  
103
static void qemu_remap_bucket(MapCacheEntry *entry,
104
                              target_phys_addr_t size,
105
                              target_phys_addr_t address_index)
106
{
107
    uint8_t *vaddr_base;
108
    xen_pfn_t *pfns;
109
    int *err;
110
    unsigned int i, j;
111
    target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT;
112

  
113
    trace_qemu_remap_bucket(address_index);
114

  
115
    pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
116
    err = qemu_mallocz(nb_pfn * sizeof (int));
117

  
118
    if (entry->vaddr_base != NULL) {
119
        if (munmap(entry->vaddr_base, size) != 0) {
120
            perror("unmap fails");
121
            exit(-1);
122
        }
123
    }
124

  
125
    for (i = 0; i < nb_pfn; i++) {
126
        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
127
    }
128

  
129
    vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE,
130
                                     pfns, err, nb_pfn);
131
    if (vaddr_base == NULL) {
132
        perror("xc_map_foreign_bulk");
133
        exit(-1);
134
    }
135

  
136
    entry->vaddr_base = vaddr_base;
137
    entry->paddr_index = address_index;
138

  
139
    for (i = 0; i < nb_pfn; i += BITS_PER_LONG) {
140
        unsigned long word = 0;
141
        if ((i + BITS_PER_LONG) > nb_pfn) {
142
            j = nb_pfn % BITS_PER_LONG;
143
        } else {
144
            j = BITS_PER_LONG;
145
        }
146
        while (j > 0) {
147
            word = (word << 1) | !err[i + --j];
148
        }
149
        entry->valid_mapping[i / BITS_PER_LONG] = word;
150
    }
151

  
152
    qemu_free(pfns);
153
    qemu_free(err);
154
}
155

  
156
uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock)
157
{
158
    MapCacheEntry *entry, *pentry = NULL;
159
    target_phys_addr_t address_index  = phys_addr >> MCACHE_BUCKET_SHIFT;
160
    target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
161

  
162
    trace_qemu_map_cache(phys_addr);
163

  
164
    if (address_index == mapcache->last_address_index && !lock) {
165
        trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
166
        return mapcache->last_address_vaddr + address_offset;
167
    }
168

  
169
    entry = &mapcache->entry[address_index % mapcache->nr_buckets];
170

  
171
    while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) {
172
        pentry = entry;
173
        entry = entry->next;
174
    }
175
    if (!entry) {
176
        entry = qemu_mallocz(sizeof (MapCacheEntry));
177
        pentry->next = entry;
178
        qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index);
179
    } else if (!entry->lock) {
180
        if (!entry->vaddr_base || entry->paddr_index != address_index ||
181
            !test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) {
182
            qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index);
183
        }
184
    }
185

  
186
    if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) {
187
        mapcache->last_address_index = -1;
188
        trace_qemu_map_cache_return(NULL);
189
        return NULL;
190
    }
191

  
192
    mapcache->last_address_index = address_index;
193
    mapcache->last_address_vaddr = entry->vaddr_base;
194
    if (lock) {
195
        MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev));
196
        entry->lock++;
197
        reventry->vaddr_req = mapcache->last_address_vaddr + address_offset;
198
        reventry->paddr_index = mapcache->last_address_index;
199
        QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
200
    }
201

  
202
    trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
203
    return mapcache->last_address_vaddr + address_offset;
204
}
205

  
206
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
207
{
208
    MapCacheRev *reventry;
209
    target_phys_addr_t paddr_index;
210
    int found = 0;
211

  
212
    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
213
        if (reventry->vaddr_req == ptr) {
214
            paddr_index = reventry->paddr_index;
215
            found = 1;
216
            break;
217
        }
218
    }
219
    if (!found) {
220
        fprintf(stderr, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr);
221
        QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
222
            DPRINTF("   "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
223
                    reventry->vaddr_req);
224
        }
225
        abort();
226
        return 0;
227
    }
228

  
229
    return paddr_index << MCACHE_BUCKET_SHIFT;
230
}
231

  
232
void qemu_invalidate_entry(uint8_t *buffer)
233
{
234
    MapCacheEntry *entry = NULL, *pentry = NULL;
235
    MapCacheRev *reventry;
236
    target_phys_addr_t paddr_index;
237
    int found = 0;
238

  
239
    if (mapcache->last_address_vaddr == buffer) {
240
        mapcache->last_address_index = -1;
241
    }
242

  
243
    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
244
        if (reventry->vaddr_req == buffer) {
245
            paddr_index = reventry->paddr_index;
246
            found = 1;
247
            break;
248
        }
249
    }
250
    if (!found) {
251
        DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer);
252
        QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
253
            DPRINTF("   "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
254
        }
255
        return;
256
    }
257
    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
258
    qemu_free(reventry);
259

  
260
    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
261
    while (entry && entry->paddr_index != paddr_index) {
262
        pentry = entry;
263
        entry = entry->next;
264
    }
265
    if (!entry) {
266
        DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
267
        return;
268
    }
269
    entry->lock--;
270
    if (entry->lock > 0 || pentry == NULL) {
271
        return;
272
    }
273

  
274
    pentry->next = entry->next;
275
    if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) {
276
        perror("unmap fails");
277
        exit(-1);
278
    }
279
    qemu_free(entry);
280
}
281

  
282
void qemu_invalidate_map_cache(void)
283
{
284
    unsigned long i;
285
    MapCacheRev *reventry;
286

  
287
    /* Flush pending AIO before destroying the mapcache */
288
    qemu_aio_flush();
289

  
290
    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
291
        DPRINTF("There should be no locked mappings at this time, "
292
                "but "TARGET_FMT_plx" -> %p is present\n",
293
                reventry->paddr_index, reventry->vaddr_req);
294
    }
295

  
296
    mapcache_lock();
297

  
298
    for (i = 0; i < mapcache->nr_buckets; i++) {
299
        MapCacheEntry *entry = &mapcache->entry[i];
300

  
301
        if (entry->vaddr_base == NULL) {
302
            continue;
303
        }
304

  
305
        if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) {
306
            perror("unmap fails");
307
            exit(-1);
308
        }
309

  
310
        entry->paddr_index = 0;
311
        entry->vaddr_base = NULL;
312
    }
313

  
314
    mapcache->last_address_index = -1;
315
    mapcache->last_address_vaddr = NULL;
316

  
317
    mapcache_unlock();
318
}
319

  
320
uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size)
321
{
322
    uint8_t *vaddr_base;
323
    xen_pfn_t *pfns;
324
    int *err;
325
    unsigned int i;
326
    target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT;
327

  
328
    trace_xen_map_block(phys_addr, size);
329
    phys_addr >>= XC_PAGE_SHIFT;
330

  
331
    pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
332
    err = qemu_mallocz(nb_pfn * sizeof (int));
333

  
334
    for (i = 0; i < nb_pfn; i++) {
335
        pfns[i] = phys_addr + i;
336
    }
337

  
338
    vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE,
339
                                     pfns, err, nb_pfn);
340
    if (vaddr_base == NULL) {
341
        perror("xc_map_foreign_bulk");
342
        exit(-1);
343
    }
344

  
345
    qemu_free(pfns);
346
    qemu_free(err);
347

  
348
    return vaddr_base;
349
}
b/xen-mapcache.h
1
/*
2
 * Copyright (C) 2011       Citrix Ltd.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 */
8

  
9
#ifndef XEN_MAPCACHE_H
10
#define XEN_MAPCACHE_H
11

  
12
#include <sys/mman.h>
13
#include "trace.h"
14

  
15
void     qemu_map_cache_init(void);
16
uint8_t  *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock);
17
void     qemu_map_cache_unlock(void *phys_addr);
18
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr);
19
void     qemu_invalidate_entry(uint8_t *buffer);
20
void     qemu_invalidate_map_cache(void);
21

  
22
uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size);
23

  
24
static inline void xen_unmap_block(void *addr, ram_addr_t size)
25
{
26
    trace_xen_unmap_block(addr, size);
27

  
28
    if (munmap(addr, size) != 0) {
29
        hw_error("xen_unmap_block: %s", strerror(errno));
30
    }
31
}
32

  
33

  
34
#define mapcache_lock()   ((void)0)
35
#define mapcache_unlock() ((void)0)
36

  
37
#endif /* !XEN_MAPCACHE_H */
b/xen-stub.c
22 22
{
23 23
}
24 24

  
25
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
26
{
27
}
28

  
25 29
qemu_irq *xen_interrupt_controller_init(void)
26 30
{
27 31
    return NULL;

Also available in: Unified diff