2 * Copyright 2012 GRNET S.A. All rights reserved.
4 * Redistribution and use in source and binary forms, with or
5 * without modification, are permitted provided that the following
8 * 1. Redistributions of source code must retain the above
9 * copyright notice, this list of conditions and the following
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
29 * The views and conclusions contained in the software and
30 * documentation are those of the authors and should not be
31 * interpreted as representing official policies, either expressed
32 * or implied, of GRNET S.A.
35 #include <xtypes/xheap.h>
36 #include <xtypes/domain.h>
38 // small allocations are considered those < 1 << (alignment_unit + SMALL_LIMIT)
40 // medium allocations are considered those < 1 << (alignment_unit + MEDIUM_LIMIT)
41 #define MEDIUM_LIMIT 10
43 //This (the -3) ensures that the space that is allocated
44 //beyond the requested bytes is less than 12.5 % of requested space
45 #define MEDIUM_AL_UNIT (SMALL_LIMIT - 3)
46 #define LARGE_AL_UNIT (MEDIUM_LIMIT - 3)
49 * Heap allocation sizes:
50 * 0 - SMALL_LIMIT -> bytes round up to (1 << alignment_unit)
51 * SMALL_LIMIT - MEDIUM_LIMIT -> bytes round up to (1 << (alignment_unit+ MEDIUM_AL_UNIT))
52 * MEDIUM_LIMIT - ... -> bytes round up to ( 1 << (alignment_unit + LARGE_AL_UNIT) )
55 //aligned alloc bytes with header size
56 static inline uint64_t __get_alloc_bytes(struct xheap *xheap, uint64_t bytes)
58 if (bytes < 1<<(xheap->alignment_unit + SMALL_LIMIT))
59 return __align(bytes + sizeof(struct xheap_header),
60 xheap->alignment_unit);
61 else if (bytes < 1<<(xheap->alignment_unit + MEDIUM_LIMIT))
62 return __align(bytes + sizeof(struct xheap_header),
63 xheap->alignment_unit + MEDIUM_AL_UNIT);
65 return __align(bytes + sizeof(struct xheap_header),
66 xheap->alignment_unit + LARGE_AL_UNIT);
69 static inline struct xheap_header* __get_header(void *ptr)
71 return (struct xheap_header *) ((unsigned long)ptr - sizeof(struct xheap_header));
74 static inline int __get_index(struct xheap *heap, uint64_t bytes)
77 uint32_t alignment_unit = heap->alignment_unit;
78 bytes = __get_alloc_bytes(heap, bytes) - sizeof(struct xheap_header);
80 if (bytes < (1<<(alignment_unit + SMALL_LIMIT)))
81 r = bytes >> alignment_unit;
82 else if (bytes < (1 << (alignment_unit + MEDIUM_LIMIT))) {
83 r = (1 << SMALL_LIMIT);
84 //r -= (1 << (alignment_unit+SMALL_LIMIT)) / (1 << (alignment_unit + MEDIUM_AL_UNIT));
85 r -= (1 << (SMALL_LIMIT - MEDIUM_AL_UNIT));
86 //XSEGLOG("%u, %u, r %d\n",((1<<alignment_unit) * 32), (1 << (alignment_unit +2)), r);
87 r += (bytes >> (alignment_unit + MEDIUM_AL_UNIT));
90 r = (1 << SMALL_LIMIT) + (1 << MEDIUM_LIMIT);
91 r -= 1 << (SMALL_LIMIT - MEDIUM_AL_UNIT);
92 r -= 1 << (MEDIUM_LIMIT - (LARGE_AL_UNIT - MEDIUM_AL_UNIT));
93 r += bytes >> (alignment_unit + LARGE_AL_UNIT);
98 uint64_t xheap_get_chunk_size(void *ptr)
100 struct xheap_header *h = __get_header(ptr);
104 void* xheap_allocate(struct xheap *heap, uint64_t bytes)
106 struct xheap_header *h;
107 int r = __get_index(heap, bytes);
108 void *mem = XPTR(&heap->mem), *addr = NULL;
109 xptr *free_list = (xptr *) mem;
111 uint64_t req_bytes = bytes;
113 xlock_acquire(&heap->lock, 1);
116 //printf("(r: %d) list[%x]: %lu\n", r, &free_list[r], list);
119 if (head > heap->cur) {
120 XSEGLOG("invalid xptr %llu found in chunk lists\n", head);
123 next = *(xptr *)(((unsigned long) mem) + head);
125 // XSEGLOG("alloced %llu bytes from list %d\n", bytes, r);
126 // printf("popped %llu out of list. list is now %llu\n", head, next);
127 addr = (void *) (((unsigned long)mem) + head);
131 bytes = __get_alloc_bytes(heap, bytes);
132 // printf("before heap->cur: %llu\n", heap->cur);
133 // printf("bytes: %llu\n", bytes);
134 if (heap->cur + bytes > heap->size)
136 addr = (void *) (((unsigned long) mem) + heap->cur + sizeof(struct xheap_header));
137 // printf("after heap->cur: %llu\n", heap->cur);
138 h = (struct xheap_header *) (((unsigned long) mem) + heap->cur);
139 h->size = bytes - sizeof(struct xheap_header);
140 h->magic = 0xdeadbeaf;
141 XPTRSET(&h->heap, heap);
145 xlock_release(&heap->lock);
146 // printf("alloced: %lx (size: %llu) (xptr: %llu)\n", addr, __get_header(addr)->size,
148 if (addr && xheap_get_chunk_size(addr) < req_bytes){
149 XSEGLOG("requested %llu bytes but heap returned %llu",
150 req_bytes, xheap_get_chunk_size(addr));
153 if (addr && xheap_get_chunk_size(addr) != (__get_alloc_bytes(heap, req_bytes) -
154 sizeof(struct xheap_header))) {
155 XSEGLOG("allocated chunk size %llu, but it should be %llu (req_bytes %llu)",
156 xheap_get_chunk_size(addr), __get_alloc_bytes(heap, req_bytes), req_bytes);
162 static inline void __add_in_free_list(struct xheap *heap, xptr* list, void *ptr)
164 void *mem = XPTR(&heap->mem);
165 xptr abs_ptr = (xptr) ((unsigned long)ptr - (unsigned long) mem);
166 xptr cur, *node = (xptr *) ptr;
168 xlock_acquire(&heap->lock, 2);
170 cur = *(volatile xptr *)list;
173 //printf("cur: %llu, next: %llu\n", cur, abs_ptr);
174 //printf("next points to %llu\n", *(xptr *) ptr);
176 xlock_release(&heap->lock);
179 void xheap_free(void *ptr)
181 struct xheap_header *h = __get_header(ptr);
182 struct xheap *heap = XPTR(&h->heap);
187 if (h->magic != 0xdeadbeaf) {
188 XSEGLOG("for ptr: %lx, magic %lx != 0xdeadbeaf", ptr, h->magic);
190 mem = XPTR(&heap->mem);
191 size = xheap_get_chunk_size(ptr);
192 free_list = (xptr *) mem;
193 r = __get_index(heap, size);
194 //printf("size: %llu, r: %d\n", size, r);
195 __add_in_free_list(heap, &free_list[r], ptr);
196 // printf("freed %lx (size: %llu)\n", ptr, __get_header(ptr)->size);
197 // XSEGLOG("freed %llu bytes to list %d\n", size, r);
201 int xheap_init(struct xheap *heap, uint64_t size, uint32_t alignment_unit, void *mem)
203 //int r = (sizeof(size)*8 - __builtin_clzl(size));
205 void *al_mem = (void *) __align((unsigned long)mem, alignment_unit);
206 uint64_t diff = (uint64_t) ((unsigned long)al_mem - (unsigned long)mem);
207 uint64_t heap_page = 1 << alignment_unit;
212 heap->alignment_unit = alignment_unit;
213 XPTRSET(&heap->mem, mem);
215 r = __get_index(heap, size);
217 /* minimum alignment unit required */
218 if (heap_page < sizeof(struct xheap_header))
220 //if (heap_page < sizeof(xptr *) * r)
223 /* make sure unused space in heap start can hold a header*/
224 if (heap->cur < sizeof(struct xheap_header)) {
225 heap->cur += heap_page;
227 heap->cur -= sizeof(struct xheap_header);
229 /* make sure there is enough unused space in heap start to be
230 * used as an indexing array
232 while (heap->cur < sizeof(xptr *) * r)
233 heap->cur += heap_page;
235 /* clean up index array */
236 free_list = (xptr *) mem;
237 for (i = 0; i < r; i++) {
241 /* make sure there is at least one "heap_page" to allocate */
242 if (heap->cur >= size - heap_page)
244 xlock_release(&heap->lock);