1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <zephyr/ztest.h>
8 #include <zephyr/sys/sys_heap.h>
9 #include <zephyr/sys/heap_listener.h>
10 #include <inttypes.h>
11 
12 /* Guess at a value for heap size based on available memory on the
13  * platform, with workarounds.
14  */
15 
16 #if defined(CONFIG_SOC_MPS2_AN521) && defined(CONFIG_QEMU_TARGET)
17 /* mps2/an521 blows up if allowed to link into large area, even though
18  * the link is successful and it claims the memory is there.  We get
19  * hard faults on boot in qemu before entry to cstart() once MEMSZ is
20  * allowed to get near 256kb.
21  */
22 # define MEMSZ (192 * 1024)
23 #elif defined(CONFIG_ARCH_POSIX)
24 /* POSIX arch based targets don't support CONFIG_SRAM_SIZE at all (because
25  * they can link anything big enough to fit on the host), so just use a
26  * reasonable value.
27  */
28 # define MEMSZ (2 * 1024 * 1024)
29 #elif defined(CONFIG_SOC_ARC_EMSDP) || defined(CONFIG_SOC_EMSK)
30 /* Various ARC platforms set CONFIG_SRAM_SIZE to 16-128M, but have a
31  * much lower limit of (32-64k) in their linker scripts.  Pick a
32  * conservative fallback.
33  */
34 # define MEMSZ (16 * 1024)
35 #else
36 /* Otherwise just trust CONFIG_SRAM_SIZE
37  */
38 # define MEMSZ (1024 * (size_t) CONFIG_SRAM_SIZE)
39 #endif
40 
41 #define BIG_HEAP_SZ MIN(256 * 1024, MEMSZ / 3)
42 #define SMALL_HEAP_SZ MIN(BIG_HEAP_SZ, 2048)
43 
44 /* With enabling SYS_HEAP_RUNTIME_STATS, the size of struct z_heap
45  * will increase 16 bytes on 64 bit CPU.
46  */
47 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
48 #define SOLO_FREE_HEADER_HEAP_SZ (80)
49 #else
50 #define SOLO_FREE_HEADER_HEAP_SZ (64)
51 #endif
52 
53 #define SCRATCH_SZ (sizeof(heapmem) / 2)
54 
55 /* The test memory.  Make them pointer arrays for robust alignment
56  * behavior
57  */
58 void *heapmem[BIG_HEAP_SZ / sizeof(void *)];
59 void *scratchmem[SCRATCH_SZ / sizeof(void *)];
60 
61 /* How many alloc/free operations are tested on each heap.  Two per
62  * byte of heap sounds about right to get exhaustive coverage without
63  * blowing too many cycles
64  */
65 #define ITERATION_COUNT (2 * SMALL_HEAP_SZ)
66 
67 /* Simple dumb hash function of the size and address */
fill_token(void * p,size_t sz)68 static size_t fill_token(void *p, size_t sz)
69 {
70 	size_t pi = (size_t) p;
71 
72 	return (pi * sz) ^ ((sz ^ 0xea6d) * ((pi << 11) | (pi >> 21)));
73 }
74 
75 /* Puts markers at the start and end of a block to ensure that nothing
76  * scribbled on it while it was allocated.  The first word is the
77  * block size.  The second and last (if they fits) are a hashed "fill
78  * token"
79  */
fill_block(void * p,size_t sz)80 static void fill_block(void *p, size_t sz)
81 {
82 	if (p == NULL) {
83 		return;
84 	}
85 
86 	size_t tok = fill_token(p, sz);
87 
88 	((size_t *)p)[0] = sz;
89 
90 	if (sz >= 2 * sizeof(size_t)) {
91 		((size_t *)p)[1] = tok;
92 	}
93 
94 	if (sz > 3*sizeof(size_t)) {
95 		((size_t *)p)[sz / sizeof(size_t) - 1] = tok;
96 	}
97 }
98 
99 /* Checks markers just before freeing a block */
check_fill(void * p)100 static void check_fill(void *p)
101 {
102 	size_t sz = ((size_t *)p)[0];
103 	size_t tok = fill_token(p, sz);
104 
105 	zassert_true(sz > 0, "");
106 
107 	if (sz >= 2 * sizeof(size_t)) {
108 		zassert_true(((size_t *)p)[1] == tok, "");
109 	}
110 
111 	if (sz > 3 * sizeof(size_t)) {
112 		zassert_true(((size_t *)p)[sz / sizeof(size_t) - 1] == tok, "");
113 	}
114 }
115 
testalloc(void * arg,size_t bytes)116 void *testalloc(void *arg, size_t bytes)
117 {
118 	void *ret = sys_heap_alloc(arg, bytes);
119 
120 	if (ret != NULL) {
121 		/* White box: the heap internals will allocate memory
122 		 * in 8 chunk units, no more than needed, but with a
123 		 * header prepended that is 4 or 8 bytes.  Use this to
124 		 * validate the block_size predicate.
125 		 */
126 		size_t blksz = sys_heap_usable_size(arg, ret);
127 		size_t addr = (size_t) ret;
128 		size_t chunk = ROUND_DOWN(addr - 1, 8);
129 		size_t hdr = addr - chunk;
130 		size_t expect = ROUND_UP(bytes + hdr, 8) - hdr;
131 
132 		zassert_equal(blksz, expect,
133 			      "wrong size block returned bytes = %ld ret = %ld",
134 			      bytes, blksz);
135 	}
136 
137 	fill_block(ret, bytes);
138 	sys_heap_validate(arg);
139 	return ret;
140 }
141 
testfree(void * arg,void * p)142 void testfree(void *arg, void *p)
143 {
144 	check_fill(p);
145 	sys_heap_free(arg, p);
146 	sys_heap_validate(arg);
147 }
148 
log_result(size_t sz,struct z_heap_stress_result * r)149 static void log_result(size_t sz, struct z_heap_stress_result *r)
150 {
151 	uint32_t tot = r->total_allocs + r->total_frees;
152 	uint32_t avg = (uint32_t)((r->accumulated_in_use_bytes + tot/2) / tot);
153 	uint32_t avg_pct = (uint32_t)((100ULL * avg + sz / 2) / sz);
154 	uint32_t succ_pct = ((100ULL * r->successful_allocs + r->total_allocs / 2)
155 			  / r->total_allocs);
156 
157 	TC_PRINT("successful allocs: %d/%d (%d%%), frees: %d,"
158 		 "  avg usage: %d/%d (%d%%)\n",
159 		 r->successful_allocs, r->total_allocs, succ_pct,
160 		 r->total_frees, avg, (int) sz, avg_pct);
161 }
162 
163 /* Do a heavy test over a small heap, with many iterations that need
164  * to reuse memory repeatedly.  Target 50% fill, as that setting tends
165  * to prevent runaway fragmentation and most allocations continue to
166  * succeed in steady state.
167  */
ZTEST(lib_heap,test_small_heap)168 ZTEST(lib_heap, test_small_heap)
169 {
170 	struct sys_heap heap;
171 	struct z_heap_stress_result result;
172 
173 	TC_PRINT("Testing small (%d byte) heap\n", (int) SMALL_HEAP_SZ);
174 
175 	sys_heap_init(&heap, heapmem, SMALL_HEAP_SZ);
176 	zassert_true(sys_heap_validate(&heap), "");
177 	sys_heap_stress(testalloc, testfree, &heap,
178 			SMALL_HEAP_SZ, ITERATION_COUNT,
179 			scratchmem, sizeof(scratchmem),
180 			50, &result);
181 
182 	log_result(SMALL_HEAP_SZ, &result);
183 }
184 
185 /* Very similar, but tests a fragmentation runaway scenario where we
186  * target 100% fill and end up breaking memory up into maximally
187  * fragmented blocks (i.e. small allocations always grab and split the
188  * bigger chunks).  Obviously success rates in alloc will be very low,
189  * but consistency should still be maintained.  Paradoxically, fill
190  * level is not much better than the 50% target due to all the
191  * fragmentation overhead (also the way we do accounting: we are
192  * counting bytes requested, so if you ask for a 3 byte block and
193  * receive a 8 byte minimal chunk, we still count that as 5 bytes of
194  * waste).
195  */
ZTEST(lib_heap,test_fragmentation)196 ZTEST(lib_heap, test_fragmentation)
197 {
198 	struct sys_heap heap;
199 	struct z_heap_stress_result result;
200 
201 	TC_PRINT("Testing maximally fragmented (%d byte) heap\n",
202 		 (int) SMALL_HEAP_SZ);
203 
204 	sys_heap_init(&heap, heapmem, SMALL_HEAP_SZ);
205 	zassert_true(sys_heap_validate(&heap), "");
206 	sys_heap_stress(testalloc, testfree, &heap,
207 			SMALL_HEAP_SZ, ITERATION_COUNT,
208 			scratchmem, sizeof(scratchmem),
209 			100, &result);
210 
211 	log_result(SMALL_HEAP_SZ, &result);
212 }
213 
214 /* The heap block format changes for heaps with more than 2^15 chunks,
215  * so test that case too.  This can be too large to iterate over
216  * exhaustively with good performance, so the relative operation count
217  * and fragmentation is going to be lower.
218  */
ZTEST(lib_heap,test_big_heap)219 ZTEST(lib_heap, test_big_heap)
220 {
221 	struct sys_heap heap;
222 	struct z_heap_stress_result result;
223 
224 	if (IS_ENABLED(CONFIG_SYS_HEAP_SMALL_ONLY)) {
225 		TC_PRINT("big heap support is disabled\n");
226 		ztest_test_skip();
227 	}
228 
229 	TC_PRINT("Testing big (%d byte) heap\n", (int) BIG_HEAP_SZ);
230 
231 	sys_heap_init(&heap, heapmem, BIG_HEAP_SZ);
232 	zassert_true(sys_heap_validate(&heap), "");
233 	sys_heap_stress(testalloc, testfree, &heap,
234 			BIG_HEAP_SZ, ITERATION_COUNT,
235 			scratchmem, sizeof(scratchmem),
236 			100, &result);
237 
238 	log_result(BIG_HEAP_SZ, &result);
239 }
240 
241 /* Test a heap with a solo free header.  A solo free header can exist
242  * only on a heap with 64 bit CPU (or chunk_header_bytes() == 8).
243  * With 64 bytes heap and 1 byte allocation on a big heap, we get:
244  *
245  *   0   1   2   3   4   5   6   7
246  * | h | h | b | b | c | 1 | s | f |
247  *
248  * where
249  * - h: chunk0 header
250  * - b: buckets in chunk0
251  * - c: chunk header for the first allocation
252  * - 1: chunk mem
253  * - s: solo free header
254  * - f: end marker / footer
255  */
ZTEST(lib_heap,test_solo_free_header)256 ZTEST(lib_heap, test_solo_free_header)
257 {
258 	struct sys_heap heap;
259 
260 	TC_PRINT("Testing solo free header in a heap\n");
261 
262 	sys_heap_init(&heap, heapmem, SOLO_FREE_HEADER_HEAP_SZ);
263 	if (sizeof(void *) > 4U) {
264 		sys_heap_alloc(&heap, 1);
265 		zassert_true(sys_heap_validate(&heap), "");
266 	} else {
267 		ztest_test_skip();
268 	}
269 }
270 
271 /* Simple clobber detection */
realloc_fill_block(uint8_t * p,size_t sz)272 void realloc_fill_block(uint8_t *p, size_t sz)
273 {
274 	uint8_t val = (uint8_t)((uintptr_t)p >> 3);
275 
276 	for (int i = 0; i < sz; i++) {
277 		p[i] = (uint8_t)(val + i);
278 	}
279 }
280 
realloc_check_block(uint8_t * data,uint8_t * orig,size_t sz)281 bool realloc_check_block(uint8_t *data, uint8_t *orig, size_t sz)
282 {
283 	uint8_t val = (uint8_t)((uintptr_t)orig >> 3);
284 
285 	for (int i = 0; i < sz; i++) {
286 		if (data[i] != (uint8_t)(val + i)) {
287 			return false;
288 		}
289 	}
290 	return true;
291 }
292 
ZTEST(lib_heap,test_realloc)293 ZTEST(lib_heap, test_realloc)
294 {
295 	struct sys_heap heap;
296 	void *p1, *p2, *p3;
297 
298 	/* Note whitebox assumption: allocation goes from low address
299 	 * to high in an empty heap.
300 	 */
301 
302 	sys_heap_init(&heap, heapmem, SMALL_HEAP_SZ);
303 
304 	/* Allocate from an empty heap, then expand, validate that it
305 	 * happens in place.
306 	 */
307 	p1 = sys_heap_alloc(&heap, 64);
308 	realloc_fill_block(p1, 64);
309 	p2 = sys_heap_realloc(&heap, p1, 128);
310 
311 	zassert_true(sys_heap_validate(&heap), "invalid heap");
312 	zassert_true(p1 == p2,
313 		     "Realloc should have expanded in place %p -> %p",
314 		     p1, p2);
315 	zassert_true(realloc_check_block(p2, p1, 64), "data changed");
316 
317 	/* Allocate two blocks, then expand the first, validate that
318 	 * it moves.
319 	 */
320 	p1 = sys_heap_alloc(&heap, 64);
321 	realloc_fill_block(p1, 64);
322 	p2 = sys_heap_alloc(&heap, 64);
323 	realloc_fill_block(p2, 64);
324 	p3 = sys_heap_realloc(&heap, p1, 128);
325 
326 	zassert_true(sys_heap_validate(&heap), "invalid heap");
327 	zassert_true(p1 != p2,
328 		     "Realloc should have moved %p", p1);
329 	zassert_true(realloc_check_block(p2, p2, 64), "data changed");
330 	zassert_true(realloc_check_block(p3, p1, 64), "data changed");
331 
332 	/* Allocate, then shrink.  Validate that it does not move. */
333 	p1 = sys_heap_alloc(&heap, 128);
334 	realloc_fill_block(p1, 128);
335 	p2 = sys_heap_realloc(&heap, p1, 64);
336 
337 	zassert_true(sys_heap_validate(&heap), "invalid heap");
338 	zassert_true(p1 == p2,
339 		     "Realloc should have shrunk in place %p -> %p",
340 		     p1, p2);
341 	zassert_true(realloc_check_block(p2, p1, 64), "data changed");
342 
343 	/* Allocate two blocks, then expand the first within a chunk.
344 	 * validate that it doesn't move. We assume CHUNK_UNIT == 8.
345 	 */
346 	p1 = sys_heap_alloc(&heap, 61);
347 	realloc_fill_block(p1, 61);
348 	p2 = sys_heap_alloc(&heap, 80);
349 	realloc_fill_block(p2, 80);
350 	p3 = sys_heap_realloc(&heap, p1, 64);
351 
352 	zassert_true(sys_heap_validate(&heap), "invalid heap");
353 	zassert_true(p1 == p3,
354 		     "Realloc should have expanded in place %p -> %p",
355 		     p1, p3);
356 	zassert_true(realloc_check_block(p3, p1, 61), "data changed");
357 
358 	/* Corner case with sys_heap_aligned_realloc() on 32-bit targets
359 	 * where actual memory doesn't match with given pointer
360 	 * (align_gap != 0).
361 	 */
362 	p1 = sys_heap_aligned_alloc(&heap, 8, 32);
363 	realloc_fill_block(p1, 32);
364 	p2 = sys_heap_alloc(&heap, 32);
365 	realloc_fill_block(p2, 32);
366 	p3 = sys_heap_aligned_realloc(&heap, p1, 8, 36);
367 
368 	zassert_true(sys_heap_validate(&heap), "invalid heap");
369 	zassert_true(realloc_check_block(p3, p1, 32), "data changed");
370 	zassert_true(realloc_check_block(p2, p2, 32), "data changed");
371 	realloc_fill_block(p3, 36);
372 	zassert_true(sys_heap_validate(&heap), "invalid heap");
373 	zassert_true(p1 != p3,
374 		     "Realloc should have moved %p", p1);
375 
376 	/* Test realloc with increasing alignment */
377 	p1 = sys_heap_aligned_alloc(&heap, 32, 32);
378 	p2 = sys_heap_aligned_alloc(&heap, 8, 32);
379 	p3 = sys_heap_aligned_realloc(&heap, p2, 8, 16);
380 	zassert_true(sys_heap_validate(&heap), "invalid heap");
381 	zassert_true(p2 == p3,
382 		     "Realloc should have expanded in place %p -> %p",
383 		     p2, p3);
384 	p3 = sys_heap_aligned_alloc(&heap, 32, 8);
385 	zassert_true(sys_heap_validate(&heap), "invalid heap");
386 	zassert_true(p2 != p3,
387 		     "Realloc should have moved %p", p2);
388 }
389 
390 #ifdef CONFIG_SYS_HEAP_LISTENER
391 static struct sys_heap listener_heap;
392 static uintptr_t listener_heap_id;
393 static void *listener_mem;
394 
heap_alloc_cb(uintptr_t heap_id,void * mem,size_t bytes)395 static void heap_alloc_cb(uintptr_t heap_id, void *mem, size_t bytes)
396 {
397 	listener_heap_id = heap_id;
398 	listener_mem = mem;
399 
400 	TC_PRINT("Heap 0x%" PRIxPTR ", alloc %p, size %u\n",
401 		 heap_id, mem, (uint32_t)bytes);
402 }
403 
heap_free_cb(uintptr_t heap_id,void * mem,size_t bytes)404 static void heap_free_cb(uintptr_t heap_id, void *mem, size_t bytes)
405 {
406 	listener_heap_id = heap_id;
407 	listener_mem = mem;
408 
409 	TC_PRINT("Heap 0x%" PRIxPTR ", free %p, size %u\n",
410 		 heap_id, mem, (uint32_t)bytes);
411 }
412 #endif /* CONFIG_SYS_HEAP_LISTENER */
413 
ZTEST(lib_heap,test_heap_listeners)414 ZTEST(lib_heap, test_heap_listeners)
415 {
416 #ifdef CONFIG_SYS_HEAP_LISTENER
417 	void *mem;
418 
419 	HEAP_LISTENER_ALLOC_DEFINE(heap_event_alloc,
420 				   HEAP_ID_FROM_POINTER(&listener_heap),
421 				   heap_alloc_cb);
422 
423 	HEAP_LISTENER_FREE_DEFINE(heap_event_free,
424 				  HEAP_ID_FROM_POINTER(&listener_heap),
425 				  heap_free_cb);
426 
427 	sys_heap_init(&listener_heap, heapmem, SMALL_HEAP_SZ);
428 
429 	/* Register listeners */
430 	heap_listener_register(&heap_event_alloc);
431 	heap_listener_register(&heap_event_free);
432 
433 	/*
434 	 * Note that sys_heap may allocate a bigger size than requested
435 	 * due to how sys_heap works. So checking whether the allocated
436 	 * size equals to the requested size does not work.
437 	 */
438 
439 	/* Alloc/free operations without explicit alignment */
440 	mem = sys_heap_alloc(&listener_heap, 32U);
441 
442 	zassert_equal(listener_heap_id,
443 		      HEAP_ID_FROM_POINTER(&listener_heap),
444 		      "Heap ID mismatched: 0x%lx != %p", listener_heap_id,
445 		      &listener_heap);
446 	zassert_equal(listener_mem, mem,
447 		      "Heap allocated pointer mismatched: %p != %p",
448 		      listener_mem, mem);
449 
450 	sys_heap_free(&listener_heap, mem);
451 	zassert_equal(listener_heap_id,
452 		      HEAP_ID_FROM_POINTER(&listener_heap),
453 		      "Heap ID mismatched: 0x%lx != %p", listener_heap_id,
454 		      &listener_heap);
455 	zassert_equal(listener_mem, mem,
456 		      "Heap allocated pointer mismatched: %p != %p",
457 		      listener_mem, mem);
458 
459 	/* Alloc/free operations with explicit alignment */
460 	mem = sys_heap_aligned_alloc(&listener_heap, 128U, 32U);
461 
462 	zassert_equal(listener_heap_id,
463 		      HEAP_ID_FROM_POINTER(&listener_heap),
464 		      "Heap ID mismatched: 0x%lx != %p", listener_heap_id,
465 		      &listener_heap);
466 	zassert_equal(listener_mem, mem,
467 		      "Heap allocated pointer mismatched: %p != %p",
468 		      listener_mem, mem);
469 
470 	sys_heap_free(&listener_heap, mem);
471 	zassert_equal(listener_heap_id,
472 		      HEAP_ID_FROM_POINTER(&listener_heap),
473 		      "Heap ID mismatched: 0x%lx != %p", listener_heap_id,
474 		      &listener_heap);
475 	zassert_equal(listener_mem, mem,
476 		      "Heap allocated pointer mismatched: %p != %p",
477 		      listener_mem, mem);
478 
479 	/* Clean up */
480 	heap_listener_unregister(&heap_event_alloc);
481 	heap_listener_unregister(&heap_event_free);
482 
483 #else /* CONFIG_SYS_HEAP_LISTENER */
484 	ztest_test_skip();
485 #endif /* CONFIG_SYS_HEAP_LISTENER */
486 }
487 
488 ZTEST_SUITE(lib_heap, NULL, NULL, NULL, NULL, NULL);
489