1 #include <xtypes/xheap.h>
2 #include <xtypes/domain.h>
4 // small allocations are considered those < 1 << (alignment_unit + SMALL_LIMIT)
6 // medium allocations are considered those < 1 << (alignment_unit + MEDIUM_LIMIT)
7 #define MEDIUM_LIMIT 10
9 //This (the -3) ensures that the space that is allocated
10 //beyond the requested bytes is less than 12.5 % of requested space
11 #define MEDIUM_AL_UNIT (SMALL_LIMIT - 3)
12 #define LARGE_AL_UNIT (MEDIUM_LIMIT - 3)
15 * Heap allocation sizes:
16 * 0 - SMALL_LIMIT -> bytes round up to (1 << alignment_unit)
17 * SMALL_LIMIT - MEDIUM_LIMIT -> bytes round up to (1 << (alignment_unit+ MEDIUM_AL_UNIT))
18 * MEDIUM_LIMIT - ... -> bytes round up to ( 1 << (alignment_unit + LARGE_AL_UNIT) )
21 //aligned alloc bytes with header size
22 static inline uint64_t __get_alloc_bytes(struct xheap *xheap, uint64_t bytes)
24 if (bytes < 1<<(xheap->alignment_unit + SMALL_LIMIT))
25 return __align(bytes + sizeof(struct xheap_header),
26 xheap->alignment_unit);
27 else if (bytes < 1<<(xheap->alignment_unit + MEDIUM_LIMIT))
28 return __align(bytes + sizeof(struct xheap_header),
29 xheap->alignment_unit + MEDIUM_AL_UNIT);
31 return __align(bytes + sizeof(struct xheap_header),
32 xheap->alignment_unit + LARGE_AL_UNIT);
35 static inline struct xheap_header* __get_header(void *ptr)
37 return (struct xheap_header *) ((unsigned long)ptr - sizeof(struct xheap_header));
40 static inline int __get_index(struct xheap *heap, uint64_t bytes)
43 uint32_t alignment_unit = heap->alignment_unit;
44 bytes = __get_alloc_bytes(heap, bytes) - sizeof(struct xheap_header);
46 if (bytes < (1<<(alignment_unit + SMALL_LIMIT)))
47 r = bytes >> alignment_unit;
48 else if (bytes < (1 << (alignment_unit + MEDIUM_LIMIT))) {
49 r = (1 << SMALL_LIMIT);
50 //r -= (1 << (alignment_unit+SMALL_LIMIT)) / (1 << (alignment_unit + MEDIUM_AL_UNIT));
51 r -= (1 << (SMALL_LIMIT - MEDIUM_AL_UNIT));
52 //XSEGLOG("%u, %u, r %d\n",((1<<alignment_unit) * 32), (1 << (alignment_unit +2)), r);
53 r += (bytes >> (alignment_unit + MEDIUM_AL_UNIT));
56 r = (1 << SMALL_LIMIT) + (1 << MEDIUM_LIMIT);
57 r -= 1 << (SMALL_LIMIT - MEDIUM_AL_UNIT);
58 r -= 1 << (MEDIUM_LIMIT - (LARGE_AL_UNIT - MEDIUM_AL_UNIT));
59 r += bytes >> (alignment_unit + LARGE_AL_UNIT);
64 uint64_t xheap_get_chunk_size(void *ptr)
66 struct xheap_header *h = __get_header(ptr);
70 void* xheap_allocate(struct xheap *heap, uint64_t bytes)
72 struct xheap_header *h;
73 int r = __get_index(heap, bytes);
74 void *mem = XPTR(&heap->mem), *addr = NULL;
75 xptr *free_list = (xptr *) mem;
77 uint64_t req_bytes = bytes;
79 xlock_acquire(&heap->lock, 1);
82 //printf("(r: %d) list[%x]: %lu\n", r, &free_list[r], list);
85 if (head > heap->cur) {
86 XSEGLOG("invalid xptr %llu found in chunk lists\n", head);
89 next = *(xptr *)(((unsigned long) mem) + head);
91 // XSEGLOG("alloced %llu bytes from list %d\n", bytes, r);
92 // printf("popped %llu out of list. list is now %llu\n", head, next);
93 addr = (void *) (((unsigned long)mem) + head);
97 bytes = __get_alloc_bytes(heap, bytes);
98 // printf("before heap->cur: %llu\n", heap->cur);
99 // printf("bytes: %llu\n", bytes);
100 if (heap->cur + bytes > heap->size)
102 addr = (void *) (((unsigned long) mem) + heap->cur + sizeof(struct xheap_header));
103 // printf("after heap->cur: %llu\n", heap->cur);
104 h = (struct xheap_header *) (((unsigned long) mem) + heap->cur);
105 h->size = bytes - sizeof(struct xheap_header);
106 h->magic = 0xdeadbeaf;
107 XPTRSET(&h->heap, heap);
111 xlock_release(&heap->lock);
112 // printf("alloced: %lx (size: %llu) (xptr: %llu)\n", addr, __get_header(addr)->size,
114 if (addr && xheap_get_chunk_size(addr) < req_bytes){
115 XSEGLOG("requested %llu bytes but heap returned %llu",
116 req_bytes, xheap_get_chunk_size(addr));
119 if (addr && xheap_get_chunk_size(addr) != (__get_alloc_bytes(heap, req_bytes) -
120 sizeof(struct xheap_header))) {
121 XSEGLOG("allocated chunk size %llu, but it should be %llu (req_bytes %llu)",
122 xheap_get_chunk_size(addr), __get_alloc_bytes(heap, req_bytes), req_bytes);
128 static inline void __add_in_free_list(struct xheap *heap, xptr* list, void *ptr)
130 void *mem = XPTR(&heap->mem);
131 xptr abs_ptr = (xptr) ((unsigned long)ptr - (unsigned long) mem);
132 xptr cur, *node = (xptr *) ptr;
134 xlock_acquire(&heap->lock, 2);
136 cur = *(volatile xptr *)list;
139 //printf("cur: %llu, next: %llu\n", cur, abs_ptr);
140 //printf("next points to %llu\n", *(xptr *) ptr);
142 xlock_release(&heap->lock);
145 void xheap_free(void *ptr)
147 struct xheap_header *h = __get_header(ptr);
148 struct xheap *heap = XPTR(&h->heap);
149 if (h->magic != 0xdeadbeaf) {
150 XSEGLOG("for ptr: %lx, magic %lx != 0xdeadbeaf", ptr, h->magic);
152 void *mem = XPTR(&heap->mem);
153 uint64_t size = xheap_get_chunk_size(ptr);
154 xptr *free_list = (xptr *) mem;
155 int r = __get_index(heap, size);
156 //printf("size: %llu, r: %d\n", size, r);
157 __add_in_free_list(heap, &free_list[r], ptr);
158 // printf("freed %lx (size: %llu)\n", ptr, __get_header(ptr)->size);
159 // XSEGLOG("freed %llu bytes to list %d\n", size, r);
163 int xheap_init(struct xheap *heap, uint64_t size, uint32_t alignment_unit, void *mem)
165 //int r = (sizeof(size)*8 - __builtin_clzl(size));
167 void *al_mem = (void *) __align((unsigned long)mem, alignment_unit);
168 uint64_t diff = (uint64_t) ((unsigned long)al_mem - (unsigned long)mem);
169 uint64_t heap_page = 1 << alignment_unit;
174 heap->alignment_unit = alignment_unit;
175 XPTRSET(&heap->mem, mem);
177 r = __get_index(heap, size);
179 /* minimum alignment unit required */
180 if (heap_page < sizeof(struct xheap_header))
182 //if (heap_page < sizeof(xptr *) * r)
185 /* make sure unused space in heap start can hold a header*/
186 if (heap->cur < sizeof(struct xheap_header)) {
187 heap->cur += heap_page;
189 heap->cur -= sizeof(struct xheap_header);
191 /* make sure there is enough unused space in heap start to be
192 * used as an indexing array
194 while (heap->cur < sizeof(xptr *) * r)
195 heap->cur += heap_page;
197 /* clean up index array */
198 free_list = (xptr *) mem;
199 for (i = 0; i < r; i++) {
203 /* make sure there is at least one "heap_page" to allocate */
204 if (heap->cur >= size - heap_page)
206 xlock_release(&heap->lock);