1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <sys/sys_heap.h>
7 #include <kernel.h>
8 #include <string.h>
9 #include "heap.h"
10
chunk_mem(struct z_heap * h,chunkid_t c)11 static void *chunk_mem(struct z_heap *h, chunkid_t c)
12 {
13 chunk_unit_t *buf = chunk_buf(h);
14 uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
15
16 CHECK(!(((uintptr_t)ret) & (big_heap(h) ? 7 : 3)));
17
18 return ret;
19 }
20
free_list_remove_bidx(struct z_heap * h,chunkid_t c,int bidx)21 static void free_list_remove_bidx(struct z_heap *h, chunkid_t c, int bidx)
22 {
23 struct z_heap_bucket *b = &h->buckets[bidx];
24
25 CHECK(!chunk_used(h, c));
26 CHECK(b->next != 0);
27 CHECK(h->avail_buckets & (1 << bidx));
28
29 if (next_free_chunk(h, c) == c) {
30 /* this is the last chunk */
31 h->avail_buckets &= ~(1 << bidx);
32 b->next = 0;
33 } else {
34 chunkid_t first = prev_free_chunk(h, c),
35 second = next_free_chunk(h, c);
36
37 b->next = second;
38 set_next_free_chunk(h, first, second);
39 set_prev_free_chunk(h, second, first);
40 }
41 }
42
free_list_remove(struct z_heap * h,chunkid_t c)43 static void free_list_remove(struct z_heap *h, chunkid_t c)
44 {
45 if (!solo_free_header(h, c)) {
46 int bidx = bucket_idx(h, chunk_size(h, c));
47 free_list_remove_bidx(h, c, bidx);
48 }
49 }
50
free_list_add_bidx(struct z_heap * h,chunkid_t c,int bidx)51 static void free_list_add_bidx(struct z_heap *h, chunkid_t c, int bidx)
52 {
53 struct z_heap_bucket *b = &h->buckets[bidx];
54
55 if (b->next == 0U) {
56 CHECK((h->avail_buckets & (1 << bidx)) == 0);
57
58 /* Empty list, first item */
59 h->avail_buckets |= (1 << bidx);
60 b->next = c;
61 set_prev_free_chunk(h, c, c);
62 set_next_free_chunk(h, c, c);
63 } else {
64 CHECK(h->avail_buckets & (1 << bidx));
65
66 /* Insert before (!) the "next" pointer */
67 chunkid_t second = b->next;
68 chunkid_t first = prev_free_chunk(h, second);
69
70 set_prev_free_chunk(h, c, first);
71 set_next_free_chunk(h, c, second);
72 set_next_free_chunk(h, first, c);
73 set_prev_free_chunk(h, second, c);
74 }
75 }
76
free_list_add(struct z_heap * h,chunkid_t c)77 static void free_list_add(struct z_heap *h, chunkid_t c)
78 {
79 if (!solo_free_header(h, c)) {
80 int bidx = bucket_idx(h, chunk_size(h, c));
81 free_list_add_bidx(h, c, bidx);
82 }
83 }
84
85 /* Splits a chunk "lc" into a left chunk and a right chunk at "rc".
86 * Leaves both chunks marked "free"
87 */
split_chunks(struct z_heap * h,chunkid_t lc,chunkid_t rc)88 static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
89 {
90 CHECK(rc > lc);
91 CHECK(rc - lc < chunk_size(h, lc));
92
93 chunksz_t sz0 = chunk_size(h, lc);
94 chunksz_t lsz = rc - lc;
95 chunksz_t rsz = sz0 - lsz;
96
97 set_chunk_size(h, lc, lsz);
98 set_chunk_size(h, rc, rsz);
99 set_left_chunk_size(h, rc, lsz);
100 set_left_chunk_size(h, right_chunk(h, rc), rsz);
101 }
102
103 /* Does not modify free list */
merge_chunks(struct z_heap * h,chunkid_t lc,chunkid_t rc)104 static void merge_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
105 {
106 chunksz_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
107
108 set_chunk_size(h, lc, newsz);
109 set_left_chunk_size(h, right_chunk(h, rc), newsz);
110 }
111
free_chunk(struct z_heap * h,chunkid_t c)112 static void free_chunk(struct z_heap *h, chunkid_t c)
113 {
114 /* Merge with free right chunk? */
115 if (!chunk_used(h, right_chunk(h, c))) {
116 free_list_remove(h, right_chunk(h, c));
117 merge_chunks(h, c, right_chunk(h, c));
118 }
119
120 /* Merge with free left chunk? */
121 if (!chunk_used(h, left_chunk(h, c))) {
122 free_list_remove(h, left_chunk(h, c));
123 merge_chunks(h, left_chunk(h, c), c);
124 c = left_chunk(h, c);
125 }
126
127 free_list_add(h, c);
128 }
129
130 /*
131 * Return the closest chunk ID corresponding to given memory pointer.
132 * Here "closest" is only meaningful in the context of sys_heap_aligned_alloc()
133 * where wanted alignment might not always correspond to a chunk header
134 * boundary.
135 */
mem_to_chunkid(struct z_heap * h,void * p)136 static chunkid_t mem_to_chunkid(struct z_heap *h, void *p)
137 {
138 uint8_t *mem = p, *base = (uint8_t *)chunk_buf(h);
139 return (mem - chunk_header_bytes(h) - base) / CHUNK_UNIT;
140 }
141
sys_heap_free(struct sys_heap * heap,void * mem)142 void sys_heap_free(struct sys_heap *heap, void *mem)
143 {
144 if (mem == NULL) {
145 return; /* ISO C free() semantics */
146 }
147 struct z_heap *h = heap->heap;
148 chunkid_t c = mem_to_chunkid(h, mem);
149
150 /*
151 * This should catch many double-free cases.
152 * This is cheap enough so let's do it all the time.
153 */
154 __ASSERT(chunk_used(h, c),
155 "unexpected heap state (double-free?) for memory at %p", mem);
156
157 /*
158 * It is easy to catch many common memory overflow cases with
159 * a quick check on this and next chunk header fields that are
160 * immediately before and after the freed memory.
161 */
162 __ASSERT(left_chunk(h, right_chunk(h, c)) == c,
163 "corrupted heap bounds (buffer overflow?) for memory at %p",
164 mem);
165
166 set_chunk_used(h, c, false);
167 free_chunk(h, c);
168 }
169
alloc_chunk(struct z_heap * h,chunksz_t sz)170 static chunkid_t alloc_chunk(struct z_heap *h, chunksz_t sz)
171 {
172 int bi = bucket_idx(h, sz);
173 struct z_heap_bucket *b = &h->buckets[bi];
174
175 CHECK(bi <= bucket_idx(h, h->end_chunk));
176
177 /* First try a bounded count of items from the minimal bucket
178 * size. These may not fit, trying (e.g.) three means that
179 * (assuming that chunk sizes are evenly distributed[1]) we
180 * have a 7/8 chance of finding a match, thus keeping the
181 * number of such blocks consumed by allocation higher than
182 * the number of smaller blocks created by fragmenting larger
183 * ones.
184 *
185 * [1] In practice, they are never evenly distributed, of
186 * course. But even in pathological situations we still
187 * maintain our constant time performance and at worst see
188 * fragmentation waste of the order of the block allocated
189 * only.
190 */
191 if (b->next) {
192 chunkid_t first = b->next;
193 int i = CONFIG_SYS_HEAP_ALLOC_LOOPS;
194 do {
195 chunkid_t c = b->next;
196 if (chunk_size(h, c) >= sz) {
197 free_list_remove_bidx(h, c, bi);
198 return c;
199 }
200 b->next = next_free_chunk(h, c);
201 CHECK(b->next != 0);
202 } while (--i && b->next != first);
203 }
204
205 /* Otherwise pick the smallest non-empty bucket guaranteed to
206 * fit and use that unconditionally.
207 */
208 uint32_t bmask = h->avail_buckets & ~((1 << (bi + 1)) - 1);
209
210 if (bmask != 0U) {
211 int minbucket = __builtin_ctz(bmask);
212 chunkid_t c = h->buckets[minbucket].next;
213
214 free_list_remove_bidx(h, c, minbucket);
215 CHECK(chunk_size(h, c) >= sz);
216 return c;
217 }
218
219 return 0;
220 }
221
sys_heap_alloc(struct sys_heap * heap,size_t bytes)222 void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
223 {
224 struct z_heap *h = heap->heap;
225
226 if (bytes == 0U || size_too_big(h, bytes)) {
227 return NULL;
228 }
229
230 chunksz_t chunk_sz = bytes_to_chunksz(h, bytes);
231 chunkid_t c = alloc_chunk(h, chunk_sz);
232 if (c == 0U) {
233 return NULL;
234 }
235
236 /* Split off remainder if any */
237 if (chunk_size(h, c) > chunk_sz) {
238 split_chunks(h, c, c + chunk_sz);
239 free_list_add(h, c + chunk_sz);
240 }
241
242 set_chunk_used(h, c, true);
243 return chunk_mem(h, c);
244 }
245
sys_heap_aligned_alloc(struct sys_heap * heap,size_t align,size_t bytes)246 void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
247 {
248 struct z_heap *h = heap->heap;
249 size_t gap, rew;
250
251 /*
252 * Split align and rewind values (if any).
253 * We allow for one bit of rewind in addition to the alignment
254 * value to efficiently accommodate z_heap_aligned_alloc().
255 * So if e.g. align = 0x28 (32 | 8) this means we align to a 32-byte
256 * boundary and then rewind 8 bytes.
257 */
258 rew = align & -align;
259 if (align != rew) {
260 align -= rew;
261 gap = MIN(rew, chunk_header_bytes(h));
262 } else {
263 if (align <= chunk_header_bytes(h)) {
264 return sys_heap_alloc(heap, bytes);
265 }
266 rew = 0;
267 gap = chunk_header_bytes(h);
268 }
269 __ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
270
271 if (bytes == 0 || size_too_big(h, bytes)) {
272 return NULL;
273 }
274
275 /*
276 * Find a free block that is guaranteed to fit.
277 * We over-allocate to account for alignment and then free
278 * the extra allocations afterwards.
279 */
280 chunksz_t padded_sz = bytes_to_chunksz(h, bytes + align - gap);
281 chunkid_t c0 = alloc_chunk(h, padded_sz);
282
283 if (c0 == 0) {
284 return NULL;
285 }
286 uint8_t *mem = chunk_mem(h, c0);
287
288 /* Align allocated memory */
289 mem = (uint8_t *) ROUND_UP(mem + rew, align) - rew;
290 chunk_unit_t *end = (chunk_unit_t *) ROUND_UP(mem + bytes, CHUNK_UNIT);
291
292 /* Get corresponding chunks */
293 chunkid_t c = mem_to_chunkid(h, mem);
294 chunkid_t c_end = end - chunk_buf(h);
295 CHECK(c >= c0 && c < c_end && c_end <= c0 + padded_sz);
296
297 /* Split and free unused prefix */
298 if (c > c0) {
299 split_chunks(h, c0, c);
300 free_list_add(h, c0);
301 }
302
303 /* Split and free unused suffix */
304 if (right_chunk(h, c) > c_end) {
305 split_chunks(h, c, c_end);
306 free_list_add(h, c_end);
307 }
308
309 set_chunk_used(h, c, true);
310 return mem;
311 }
312
sys_heap_aligned_realloc(struct sys_heap * heap,void * ptr,size_t align,size_t bytes)313 void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
314 size_t align, size_t bytes)
315 {
316 struct z_heap *h = heap->heap;
317
318 /* special realloc semantics */
319 if (ptr == NULL) {
320 return sys_heap_aligned_alloc(heap, align, bytes);
321 }
322 if (bytes == 0) {
323 sys_heap_free(heap, ptr);
324 return NULL;
325 }
326
327 __ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
328
329 if (size_too_big(h, bytes)) {
330 return NULL;
331 }
332
333 chunkid_t c = mem_to_chunkid(h, ptr);
334 chunkid_t rc = right_chunk(h, c);
335 size_t align_gap = (uint8_t *)ptr - (uint8_t *)chunk_mem(h, c);
336 chunksz_t chunks_need = bytes_to_chunksz(h, bytes + align_gap);
337
338 if (align && ((uintptr_t)ptr & (align - 1))) {
339 /* ptr is not sufficiently aligned */
340 } else if (chunk_size(h, c) == chunks_need) {
341 /* We're good already */
342 return ptr;
343 } else if (chunk_size(h, c) > chunks_need) {
344 /* Shrink in place, split off and free unused suffix */
345 split_chunks(h, c, c + chunks_need);
346 set_chunk_used(h, c, true);
347 free_chunk(h, c + chunks_need);
348 return ptr;
349 } else if (!chunk_used(h, rc) &&
350 (chunk_size(h, c) + chunk_size(h, rc) >= chunks_need)) {
351 /* Expand: split the right chunk and append */
352 chunkid_t split_size = chunks_need - chunk_size(h, c);
353
354 free_list_remove(h, rc);
355
356 if (split_size < chunk_size(h, rc)) {
357 split_chunks(h, rc, rc + split_size);
358 free_list_add(h, rc + split_size);
359 }
360
361 merge_chunks(h, c, rc);
362 set_chunk_used(h, c, true);
363 return ptr;
364 } else {
365 ;
366 }
367
368 /* Fallback: allocate and copy */
369 void *ptr2 = sys_heap_aligned_alloc(heap, align, bytes);
370
371 if (ptr2 != NULL) {
372 size_t prev_size = chunksz_to_bytes(h, chunk_size(h, c)) - align_gap;
373
374 memcpy(ptr2, ptr, MIN(prev_size, bytes));
375 sys_heap_free(heap, ptr);
376 }
377 return ptr2;
378 }
379
sys_heap_init(struct sys_heap * heap,void * mem,size_t bytes)380 void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
381 {
382 /* Must fit in a 31 bit count of HUNK_UNIT */
383 __ASSERT(bytes / CHUNK_UNIT <= 0x7fffffffU, "heap size is too big");
384
385 /* Reserve the end marker chunk's header */
386 __ASSERT(bytes > heap_footer_bytes(bytes), "heap size is too small");
387 bytes -= heap_footer_bytes(bytes);
388
389 /* Round the start up, the end down */
390 uintptr_t addr = ROUND_UP(mem, CHUNK_UNIT);
391 uintptr_t end = ROUND_DOWN((uint8_t *)mem + bytes, CHUNK_UNIT);
392 chunksz_t heap_sz = (end - addr) / CHUNK_UNIT;
393
394 CHECK(end > addr);
395 __ASSERT(heap_sz > chunksz(sizeof(struct z_heap)), "heap size is too small");
396
397 struct z_heap *h = (struct z_heap *)addr;
398 heap->heap = h;
399 h->end_chunk = heap_sz;
400 h->avail_buckets = 0;
401
402 int nb_buckets = bucket_idx(h, heap_sz) + 1;
403 chunksz_t chunk0_size = chunksz(sizeof(struct z_heap) +
404 nb_buckets * sizeof(struct z_heap_bucket));
405
406 __ASSERT(chunk0_size + min_chunk_size(h) <= heap_sz, "heap size is too small");
407
408 for (int i = 0; i < nb_buckets; i++) {
409 h->buckets[i].next = 0;
410 }
411
412 /* chunk containing our struct z_heap */
413 set_chunk_size(h, 0, chunk0_size);
414 set_left_chunk_size(h, 0, 0);
415 set_chunk_used(h, 0, true);
416
417 /* chunk containing the free heap */
418 set_chunk_size(h, chunk0_size, heap_sz - chunk0_size);
419 set_left_chunk_size(h, chunk0_size, chunk0_size);
420
421 /* the end marker chunk */
422 set_chunk_size(h, heap_sz, 0);
423 set_left_chunk_size(h, heap_sz, heap_sz - chunk0_size);
424 set_chunk_used(h, heap_sz, true);
425
426 free_list_add(h, chunk0_size);
427 }
428