Revision 7267c094 xen-mapcache.c

b/xen-mapcache.c
87 87
    unsigned long size;
88 88
    struct rlimit rlimit_as;
89 89

  
90
    mapcache = qemu_mallocz(sizeof (MapCache));
90
    mapcache = g_malloc0(sizeof (MapCache));
91 91

  
92 92
    QTAILQ_INIT(&mapcache->locked_entries);
93 93
    mapcache->last_address_index = -1;
......
111 111
    size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
112 112
    DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
113 113
            mapcache->nr_buckets, size);
114
    mapcache->entry = qemu_mallocz(size);
114
    mapcache->entry = g_malloc0(size);
115 115
}
116 116

  
117 117
static void xen_remap_bucket(MapCacheEntry *entry,
......
126 126

  
127 127
    trace_xen_remap_bucket(address_index);
128 128

  
129
    pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
130
    err = qemu_mallocz(nb_pfn * sizeof (int));
129
    pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
130
    err = g_malloc0(nb_pfn * sizeof (int));
131 131

  
132 132
    if (entry->vaddr_base != NULL) {
133 133
        if (munmap(entry->vaddr_base, entry->size) != 0) {
......
136 136
        }
137 137
    }
138 138
    if (entry->valid_mapping != NULL) {
139
        qemu_free(entry->valid_mapping);
139
        g_free(entry->valid_mapping);
140 140
        entry->valid_mapping = NULL;
141 141
    }
142 142

  
......
154 154
    entry->vaddr_base = vaddr_base;
155 155
    entry->paddr_index = address_index;
156 156
    entry->size = size;
157
    entry->valid_mapping = (unsigned long *) qemu_mallocz(sizeof(unsigned long) *
157
    entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
158 158
            BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
159 159

  
160 160
    bitmap_zero(entry->valid_mapping, nb_pfn);
......
164 164
        }
165 165
    }
166 166

  
167
    qemu_free(pfns);
168
    qemu_free(err);
167
    g_free(pfns);
168
    g_free(err);
169 169
}
170 170

  
171 171
uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
......
201 201
        entry = entry->next;
202 202
    }
203 203
    if (!entry) {
204
        entry = qemu_mallocz(sizeof (MapCacheEntry));
204
        entry = g_malloc0(sizeof (MapCacheEntry));
205 205
        pentry->next = entry;
206 206
        xen_remap_bucket(entry, __size, address_index);
207 207
    } else if (!entry->lock) {
......
223 223
    mapcache->last_address_index = address_index;
224 224
    mapcache->last_address_vaddr = entry->vaddr_base;
225 225
    if (lock) {
226
        MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev));
226
        MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
227 227
        entry->lock++;
228 228
        reventry->vaddr_req = mapcache->last_address_vaddr + address_offset;
229 229
        reventry->paddr_index = mapcache->last_address_index;
......
301 301
        return;
302 302
    }
303 303
    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
304
    qemu_free(reventry);
304
    g_free(reventry);
305 305

  
306 306
    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
307 307
    while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
......
322 322
        perror("unmap fails");
323 323
        exit(-1);
324 324
    }
325
    qemu_free(entry->valid_mapping);
326
    qemu_free(entry);
325
    g_free(entry->valid_mapping);
326
    g_free(entry);
327 327
}
328 328

  
329 329
void xen_invalidate_map_cache(void)
......
357 357
        entry->paddr_index = 0;
358 358
        entry->vaddr_base = NULL;
359 359
        entry->size = 0;
360
        qemu_free(entry->valid_mapping);
360
        g_free(entry->valid_mapping);
361 361
        entry->valid_mapping = NULL;
362 362
    }
363 363

  

Also available in: Unified diff