1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/sys/sys_heap.h>
7 #include <zephyr/sys/util.h>
8 #include <zephyr/sys/heap_listener.h>
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include "heap.h"
12 #ifdef CONFIG_MSAN
13 #include <sanitizer/msan_interface.h>
14 #endif
15 
16 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
increase_allocated_bytes(struct z_heap * h,size_t num_bytes)17 static inline void increase_allocated_bytes(struct z_heap *h, size_t num_bytes)
18 {
19 	h->allocated_bytes += num_bytes;
20 	h->max_allocated_bytes = MAX(h->max_allocated_bytes, h->allocated_bytes);
21 }
22 #endif
23 
chunk_mem(struct z_heap * h,chunkid_t c)24 static void *chunk_mem(struct z_heap *h, chunkid_t c)
25 {
26 	chunk_unit_t *buf = chunk_buf(h);
27 	uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
28 
29 	CHECK(!(((uintptr_t)ret) & (big_heap(h) ? 7 : 3)));
30 
31 	return ret;
32 }
33 
free_list_remove_bidx(struct z_heap * h,chunkid_t c,int bidx)34 static void free_list_remove_bidx(struct z_heap *h, chunkid_t c, int bidx)
35 {
36 	struct z_heap_bucket *b = &h->buckets[bidx];
37 
38 	CHECK(!chunk_used(h, c));
39 	CHECK(b->next != 0);
40 	CHECK(h->avail_buckets & BIT(bidx));
41 
42 	if (next_free_chunk(h, c) == c) {
43 		/* this is the last chunk */
44 		h->avail_buckets &= ~BIT(bidx);
45 		b->next = 0;
46 	} else {
47 		chunkid_t first = prev_free_chunk(h, c),
48 			  second = next_free_chunk(h, c);
49 
50 		b->next = second;
51 		set_next_free_chunk(h, first, second);
52 		set_prev_free_chunk(h, second, first);
53 	}
54 
55 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
56 	h->free_bytes -= chunksz_to_bytes(h, chunk_size(h, c));
57 #endif
58 }
59 
free_list_remove(struct z_heap * h,chunkid_t c)60 static void free_list_remove(struct z_heap *h, chunkid_t c)
61 {
62 	if (!solo_free_header(h, c)) {
63 		int bidx = bucket_idx(h, chunk_size(h, c));
64 		free_list_remove_bidx(h, c, bidx);
65 	}
66 }
67 
free_list_add_bidx(struct z_heap * h,chunkid_t c,int bidx)68 static void free_list_add_bidx(struct z_heap *h, chunkid_t c, int bidx)
69 {
70 	struct z_heap_bucket *b = &h->buckets[bidx];
71 
72 	if (b->next == 0U) {
73 		CHECK((h->avail_buckets & BIT(bidx)) == 0);
74 
75 		/* Empty list, first item */
76 		h->avail_buckets |= BIT(bidx);
77 		b->next = c;
78 		set_prev_free_chunk(h, c, c);
79 		set_next_free_chunk(h, c, c);
80 	} else {
81 		CHECK(h->avail_buckets & BIT(bidx));
82 
83 		/* Insert before (!) the "next" pointer */
84 		chunkid_t second = b->next;
85 		chunkid_t first = prev_free_chunk(h, second);
86 
87 		set_prev_free_chunk(h, c, first);
88 		set_next_free_chunk(h, c, second);
89 		set_next_free_chunk(h, first, c);
90 		set_prev_free_chunk(h, second, c);
91 	}
92 
93 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
94 	h->free_bytes += chunksz_to_bytes(h, chunk_size(h, c));
95 #endif
96 }
97 
free_list_add(struct z_heap * h,chunkid_t c)98 static void free_list_add(struct z_heap *h, chunkid_t c)
99 {
100 	if (!solo_free_header(h, c)) {
101 		int bidx = bucket_idx(h, chunk_size(h, c));
102 		free_list_add_bidx(h, c, bidx);
103 	}
104 }
105 
106 /* Splits a chunk "lc" into a left chunk and a right chunk at "rc".
107  * Leaves both chunks marked "free"
108  */
split_chunks(struct z_heap * h,chunkid_t lc,chunkid_t rc)109 static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
110 {
111 	CHECK(rc > lc);
112 	CHECK(rc - lc < chunk_size(h, lc));
113 
114 	chunksz_t sz0 = chunk_size(h, lc);
115 	chunksz_t lsz = rc - lc;
116 	chunksz_t rsz = sz0 - lsz;
117 
118 	set_chunk_size(h, lc, lsz);
119 	set_chunk_size(h, rc, rsz);
120 	set_left_chunk_size(h, rc, lsz);
121 	set_left_chunk_size(h, right_chunk(h, rc), rsz);
122 }
123 
124 /* Does not modify free list */
merge_chunks(struct z_heap * h,chunkid_t lc,chunkid_t rc)125 static void merge_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
126 {
127 	chunksz_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
128 
129 	set_chunk_size(h, lc, newsz);
130 	set_left_chunk_size(h, right_chunk(h, rc), newsz);
131 }
132 
free_chunk(struct z_heap * h,chunkid_t c)133 static void free_chunk(struct z_heap *h, chunkid_t c)
134 {
135 	/* Merge with free right chunk? */
136 	if (!chunk_used(h, right_chunk(h, c))) {
137 		free_list_remove(h, right_chunk(h, c));
138 		merge_chunks(h, c, right_chunk(h, c));
139 	}
140 
141 	/* Merge with free left chunk? */
142 	if (!chunk_used(h, left_chunk(h, c))) {
143 		free_list_remove(h, left_chunk(h, c));
144 		merge_chunks(h, left_chunk(h, c), c);
145 		c = left_chunk(h, c);
146 	}
147 
148 	free_list_add(h, c);
149 }
150 
151 /*
152  * Return the closest chunk ID corresponding to given memory pointer.
153  * Here "closest" is only meaningful in the context of sys_heap_aligned_alloc()
154  * where wanted alignment might not always correspond to a chunk header
155  * boundary.
156  */
mem_to_chunkid(struct z_heap * h,void * p)157 static chunkid_t mem_to_chunkid(struct z_heap *h, void *p)
158 {
159 	uint8_t *mem = p, *base = (uint8_t *)chunk_buf(h);
160 	return (mem - chunk_header_bytes(h) - base) / CHUNK_UNIT;
161 }
162 
sys_heap_free(struct sys_heap * heap,void * mem)163 void sys_heap_free(struct sys_heap *heap, void *mem)
164 {
165 	if (mem == NULL) {
166 		return; /* ISO C free() semantics */
167 	}
168 	struct z_heap *h = heap->heap;
169 	chunkid_t c = mem_to_chunkid(h, mem);
170 
171 	/*
172 	 * This should catch many double-free cases.
173 	 * This is cheap enough so let's do it all the time.
174 	 */
175 	__ASSERT(chunk_used(h, c),
176 		 "unexpected heap state (double-free?) for memory at %p", mem);
177 
178 	/*
179 	 * It is easy to catch many common memory overflow cases with
180 	 * a quick check on this and next chunk header fields that are
181 	 * immediately before and after the freed memory.
182 	 */
183 	__ASSERT(left_chunk(h, right_chunk(h, c)) == c,
184 		 "corrupted heap bounds (buffer overflow?) for memory at %p",
185 		 mem);
186 
187 	set_chunk_used(h, c, false);
188 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
189 	h->allocated_bytes -= chunksz_to_bytes(h, chunk_size(h, c));
190 #endif
191 
192 #ifdef CONFIG_SYS_HEAP_LISTENER
193 	heap_listener_notify_free(HEAP_ID_FROM_POINTER(heap), mem,
194 				  chunksz_to_bytes(h, chunk_size(h, c)));
195 #endif
196 
197 	free_chunk(h, c);
198 }
199 
sys_heap_usable_size(struct sys_heap * heap,void * mem)200 size_t sys_heap_usable_size(struct sys_heap *heap, void *mem)
201 {
202 	struct z_heap *h = heap->heap;
203 	chunkid_t c = mem_to_chunkid(h, mem);
204 	size_t addr = (size_t)mem;
205 	size_t chunk_base = (size_t)&chunk_buf(h)[c];
206 	size_t chunk_sz = chunk_size(h, c) * CHUNK_UNIT;
207 
208 	return chunk_sz - (addr - chunk_base);
209 }
210 
alloc_chunk(struct z_heap * h,chunksz_t sz)211 static chunkid_t alloc_chunk(struct z_heap *h, chunksz_t sz)
212 {
213 	int bi = bucket_idx(h, sz);
214 	struct z_heap_bucket *b = &h->buckets[bi];
215 
216 	CHECK(bi <= bucket_idx(h, h->end_chunk));
217 
218 	/* First try a bounded count of items from the minimal bucket
219 	 * size.  These may not fit, trying (e.g.) three means that
220 	 * (assuming that chunk sizes are evenly distributed[1]) we
221 	 * have a 7/8 chance of finding a match, thus keeping the
222 	 * number of such blocks consumed by allocation higher than
223 	 * the number of smaller blocks created by fragmenting larger
224 	 * ones.
225 	 *
226 	 * [1] In practice, they are never evenly distributed, of
227 	 * course.  But even in pathological situations we still
228 	 * maintain our constant time performance and at worst see
229 	 * fragmentation waste of the order of the block allocated
230 	 * only.
231 	 */
232 	if (b->next) {
233 		chunkid_t first = b->next;
234 		int i = CONFIG_SYS_HEAP_ALLOC_LOOPS;
235 		do {
236 			chunkid_t c = b->next;
237 			if (chunk_size(h, c) >= sz) {
238 				free_list_remove_bidx(h, c, bi);
239 				return c;
240 			}
241 			b->next = next_free_chunk(h, c);
242 			CHECK(b->next != 0);
243 		} while (--i && b->next != first);
244 	}
245 
246 	/* Otherwise pick the smallest non-empty bucket guaranteed to
247 	 * fit and use that unconditionally.
248 	 */
249 	uint32_t bmask = h->avail_buckets & ~BIT_MASK(bi + 1);
250 
251 	if (bmask != 0U) {
252 		int minbucket = __builtin_ctz(bmask);
253 		chunkid_t c = h->buckets[minbucket].next;
254 
255 		free_list_remove_bidx(h, c, minbucket);
256 		CHECK(chunk_size(h, c) >= sz);
257 		return c;
258 	}
259 
260 	return 0;
261 }
262 
sys_heap_alloc(struct sys_heap * heap,size_t bytes)263 void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
264 {
265 	struct z_heap *h = heap->heap;
266 	void *mem;
267 
268 	if (bytes == 0U || size_too_big(h, bytes)) {
269 		return NULL;
270 	}
271 
272 	chunksz_t chunk_sz = bytes_to_chunksz(h, bytes);
273 	chunkid_t c = alloc_chunk(h, chunk_sz);
274 	if (c == 0U) {
275 		return NULL;
276 	}
277 
278 	/* Split off remainder if any */
279 	if (chunk_size(h, c) > chunk_sz) {
280 		split_chunks(h, c, c + chunk_sz);
281 		free_list_add(h, c + chunk_sz);
282 	}
283 
284 	set_chunk_used(h, c, true);
285 
286 	mem = chunk_mem(h, c);
287 
288 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
289 	increase_allocated_bytes(h, chunksz_to_bytes(h, chunk_size(h, c)));
290 #endif
291 
292 #ifdef CONFIG_SYS_HEAP_LISTENER
293 	heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(heap), mem,
294 				   chunksz_to_bytes(h, chunk_size(h, c)));
295 #endif
296 
297 	IF_ENABLED(CONFIG_MSAN, (__msan_allocated_memory(mem, bytes)));
298 	return mem;
299 }
300 
sys_heap_aligned_alloc(struct sys_heap * heap,size_t align,size_t bytes)301 void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
302 {
303 	struct z_heap *h = heap->heap;
304 	size_t gap, rew;
305 
306 	/*
307 	 * Split align and rewind values (if any).
308 	 * We allow for one bit of rewind in addition to the alignment
309 	 * value to efficiently accommodate z_heap_aligned_alloc().
310 	 * So if e.g. align = 0x28 (32 | 8) this means we align to a 32-byte
311 	 * boundary and then rewind 8 bytes.
312 	 */
313 	rew = align & -align;
314 	if (align != rew) {
315 		align -= rew;
316 		gap = MIN(rew, chunk_header_bytes(h));
317 	} else {
318 		if (align <= chunk_header_bytes(h)) {
319 			return sys_heap_alloc(heap, bytes);
320 		}
321 		rew = 0;
322 		gap = chunk_header_bytes(h);
323 	}
324 	__ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
325 
326 	if (bytes == 0 || size_too_big(h, bytes)) {
327 		return NULL;
328 	}
329 
330 	/*
331 	 * Find a free block that is guaranteed to fit.
332 	 * We over-allocate to account for alignment and then free
333 	 * the extra allocations afterwards.
334 	 */
335 	chunksz_t padded_sz = bytes_to_chunksz(h, bytes + align - gap);
336 	chunkid_t c0 = alloc_chunk(h, padded_sz);
337 
338 	if (c0 == 0) {
339 		return NULL;
340 	}
341 	uint8_t *mem = chunk_mem(h, c0);
342 
343 	/* Align allocated memory */
344 	mem = (uint8_t *) ROUND_UP(mem + rew, align) - rew;
345 	chunk_unit_t *end = (chunk_unit_t *) ROUND_UP(mem + bytes, CHUNK_UNIT);
346 
347 	/* Get corresponding chunks */
348 	chunkid_t c = mem_to_chunkid(h, mem);
349 	chunkid_t c_end = end - chunk_buf(h);
350 	CHECK(c >= c0 && c  < c_end && c_end <= c0 + padded_sz);
351 
352 	/* Split and free unused prefix */
353 	if (c > c0) {
354 		split_chunks(h, c0, c);
355 		free_list_add(h, c0);
356 	}
357 
358 	/* Split and free unused suffix */
359 	if (right_chunk(h, c) > c_end) {
360 		split_chunks(h, c, c_end);
361 		free_list_add(h, c_end);
362 	}
363 
364 	set_chunk_used(h, c, true);
365 
366 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
367 	increase_allocated_bytes(h, chunksz_to_bytes(h, chunk_size(h, c)));
368 #endif
369 
370 #ifdef CONFIG_SYS_HEAP_LISTENER
371 	heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(heap), mem,
372 				   chunksz_to_bytes(h, chunk_size(h, c)));
373 #endif
374 
375 	IF_ENABLED(CONFIG_MSAN, (__msan_allocated_memory(mem, bytes)));
376 	return mem;
377 }
378 
sys_heap_aligned_realloc(struct sys_heap * heap,void * ptr,size_t align,size_t bytes)379 void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
380 			       size_t align, size_t bytes)
381 {
382 	struct z_heap *h = heap->heap;
383 
384 	/* special realloc semantics */
385 	if (ptr == NULL) {
386 		return sys_heap_aligned_alloc(heap, align, bytes);
387 	}
388 	if (bytes == 0) {
389 		sys_heap_free(heap, ptr);
390 		return NULL;
391 	}
392 
393 	__ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
394 
395 	if (size_too_big(h, bytes)) {
396 		return NULL;
397 	}
398 
399 	chunkid_t c = mem_to_chunkid(h, ptr);
400 	chunkid_t rc = right_chunk(h, c);
401 	size_t align_gap = (uint8_t *)ptr - (uint8_t *)chunk_mem(h, c);
402 	chunksz_t chunks_need = bytes_to_chunksz(h, bytes + align_gap);
403 
404 	if (align && ((uintptr_t)ptr & (align - 1))) {
405 		/* ptr is not sufficiently aligned */
406 	} else if (chunk_size(h, c) == chunks_need) {
407 		/* We're good already */
408 		return ptr;
409 	} else if (chunk_size(h, c) > chunks_need) {
410 		/* Shrink in place, split off and free unused suffix */
411 #ifdef CONFIG_SYS_HEAP_LISTENER
412 		size_t bytes_freed = chunksz_to_bytes(h, chunk_size(h, c));
413 #endif
414 
415 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
416 		h->allocated_bytes -=
417 			(chunk_size(h, c) - chunks_need) * CHUNK_UNIT;
418 #endif
419 
420 		split_chunks(h, c, c + chunks_need);
421 		set_chunk_used(h, c, true);
422 		free_chunk(h, c + chunks_need);
423 
424 #ifdef CONFIG_SYS_HEAP_LISTENER
425 		heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(heap), ptr,
426 					   chunksz_to_bytes(h, chunk_size(h, c)));
427 		heap_listener_notify_free(HEAP_ID_FROM_POINTER(heap), ptr,
428 					  bytes_freed);
429 #endif
430 
431 		return ptr;
432 	} else if (!chunk_used(h, rc) &&
433 		   (chunk_size(h, c) + chunk_size(h, rc) >= chunks_need)) {
434 		/* Expand: split the right chunk and append */
435 		chunksz_t split_size = chunks_need - chunk_size(h, c);
436 
437 #ifdef CONFIG_SYS_HEAP_LISTENER
438 		size_t bytes_freed = chunksz_to_bytes(h, chunk_size(h, c));
439 #endif
440 
441 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
442 		increase_allocated_bytes(h, split_size * CHUNK_UNIT);
443 #endif
444 
445 		free_list_remove(h, rc);
446 
447 		if (split_size < chunk_size(h, rc)) {
448 			split_chunks(h, rc, rc + split_size);
449 			free_list_add(h, rc + split_size);
450 		}
451 
452 		merge_chunks(h, c, rc);
453 		set_chunk_used(h, c, true);
454 
455 #ifdef CONFIG_SYS_HEAP_LISTENER
456 		heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(heap), ptr,
457 					   chunksz_to_bytes(h, chunk_size(h, c)));
458 		heap_listener_notify_free(HEAP_ID_FROM_POINTER(heap), ptr,
459 					  bytes_freed);
460 #endif
461 
462 		return ptr;
463 	} else {
464 		;
465 	}
466 
467 	/*
468 	 * Fallback: allocate and copy
469 	 *
470 	 * Note for heap listener notification:
471 	 * The calls to allocation and free functions generate
472 	 * notification already, so there is no need to those here.
473 	 */
474 	void *ptr2 = sys_heap_aligned_alloc(heap, align, bytes);
475 
476 	if (ptr2 != NULL) {
477 		size_t prev_size = chunksz_to_bytes(h, chunk_size(h, c)) - align_gap;
478 
479 		memcpy(ptr2, ptr, MIN(prev_size, bytes));
480 		sys_heap_free(heap, ptr);
481 	}
482 	return ptr2;
483 }
484 
sys_heap_init(struct sys_heap * heap,void * mem,size_t bytes)485 void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
486 {
487 	IF_ENABLED(CONFIG_MSAN, (__sanitizer_dtor_callback(mem, bytes)));
488 
489 	if (IS_ENABLED(CONFIG_SYS_HEAP_SMALL_ONLY)) {
490 		/* Must fit in a 15 bit count of HUNK_UNIT */
491 		__ASSERT(bytes / CHUNK_UNIT <= 0x7fffU, "heap size is too big");
492 	} else {
493 		/* Must fit in a 31 bit count of HUNK_UNIT */
494 		__ASSERT(bytes / CHUNK_UNIT <= 0x7fffffffU, "heap size is too big");
495 	}
496 
497 	/* Reserve the end marker chunk's header */
498 	__ASSERT(bytes > heap_footer_bytes(bytes), "heap size is too small");
499 	bytes -= heap_footer_bytes(bytes);
500 
501 	/* Round the start up, the end down */
502 	uintptr_t addr = ROUND_UP(mem, CHUNK_UNIT);
503 	uintptr_t end = ROUND_DOWN((uint8_t *)mem + bytes, CHUNK_UNIT);
504 	chunksz_t heap_sz = (end - addr) / CHUNK_UNIT;
505 
506 	CHECK(end > addr);
507 	__ASSERT(heap_sz > chunksz(sizeof(struct z_heap)), "heap size is too small");
508 
509 	struct z_heap *h = (struct z_heap *)addr;
510 	heap->heap = h;
511 	h->end_chunk = heap_sz;
512 	h->avail_buckets = 0;
513 
514 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
515 	h->free_bytes = 0;
516 	h->allocated_bytes = 0;
517 	h->max_allocated_bytes = 0;
518 #endif
519 
520 	int nb_buckets = bucket_idx(h, heap_sz) + 1;
521 	chunksz_t chunk0_size = chunksz(sizeof(struct z_heap) +
522 				     nb_buckets * sizeof(struct z_heap_bucket));
523 
524 	__ASSERT(chunk0_size + min_chunk_size(h) <= heap_sz, "heap size is too small");
525 
526 	for (int i = 0; i < nb_buckets; i++) {
527 		h->buckets[i].next = 0;
528 	}
529 
530 	/* chunk containing our struct z_heap */
531 	set_chunk_size(h, 0, chunk0_size);
532 	set_left_chunk_size(h, 0, 0);
533 	set_chunk_used(h, 0, true);
534 
535 	/* chunk containing the free heap */
536 	set_chunk_size(h, chunk0_size, heap_sz - chunk0_size);
537 	set_left_chunk_size(h, chunk0_size, chunk0_size);
538 
539 	/* the end marker chunk */
540 	set_chunk_size(h, heap_sz, 0);
541 	set_left_chunk_size(h, heap_sz, heap_sz - chunk0_size);
542 	set_chunk_used(h, heap_sz, true);
543 
544 	free_list_add(h, chunk0_size);
545 }
546