1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <kernel_internal.h>
9 #include <zephyr/irq_offload.h>
10 #include <zephyr/sys/multi_heap.h>
11 #include "test_mheap.h"
12 
13 #define MALLOC_IN_THREAD_STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
14 #define INCREMENTAL_FILL_STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE + \
15 				     (BLK_NUM_MAX * sizeof(void *) * 2))
16 #define OVERFLOW_SIZE    SIZE_MAX
17 
18 #define NMEMB   8
19 #define SIZE    (K_HEAP_MEM_POOL_SIZE / NMEMB / 2)
20 #define BOUNDS  (NMEMB * SIZE)
21 
22 #define N_MULTI_HEAPS 4
23 #define MHEAP_BYTES 128
24 
25 static struct sys_multi_heap multi_heap;
26 static char heap_mem[N_MULTI_HEAPS][MHEAP_BYTES];
27 static struct sys_heap mheaps[N_MULTI_HEAPS];
28 
29 K_SEM_DEFINE(malloc_in_thread_sem, 0, 1);
30 K_THREAD_STACK_DEFINE(malloc_in_thread_tstack, MALLOC_IN_THREAD_STACK_SIZE);
31 struct k_thread malloc_in_thread_tdata;
32 
33 K_THREAD_STACK_DEFINE(malloc_free_tstack, INCREMENTAL_FILL_STACK_SIZE);
34 struct k_thread malloc_free_tdata;
35 
36 K_THREAD_STACK_DEFINE(realloc_tstack, INCREMENTAL_FILL_STACK_SIZE);
37 struct k_thread realloc_tdata;
38 
malloc_free_handler(void * p1,void * p2,void * p3)39 static void malloc_free_handler(void *p1, void *p2, void *p3)
40 {
41 	void *block[2 * BLK_NUM_MAX], *block_fail;
42 	int nb;
43 
44 	for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
45 		/**
46 		 * TESTPOINT: This routine provides traditional malloc()
47 		 * semantics. Memory is allocated from the heap memory pool.
48 		 */
49 		block[nb] = k_malloc(BLK_SIZE_MIN);
50 		if (block[nb] == NULL) {
51 			break;
52 		}
53 	}
54 
55 	block_fail = k_malloc(BLK_SIZE_MIN);
56 	/** TESTPOINT: Return NULL if fail.*/
57 	zassert_is_null(block_fail, NULL);
58 
59 	for (int i = 0; i < nb; i++) {
60 		/**
61 		 * TESTPOINT: This routine provides traditional free()
62 		 * semantics. The memory being returned must have been allocated
63 		 * from the heap memory pool.
64 		 */
65 		k_free(block[i]);
66 	}
67 	/** TESTPOINT: If ptr is NULL, no operation is performed.*/
68 	k_free(NULL);
69 
70 	/** TESTPOINT: Return NULL if fail.*/
71 	block_fail = k_malloc(OVERFLOW_SIZE);
72 	zassert_is_null(block_fail, NULL);
73 }
74 
tIsr_malloc_and_free(void * data)75 static void tIsr_malloc_and_free(void *data)
76 {
77 	ARG_UNUSED(data);
78 	void *ptr;
79 
80 	ptr = (char *)z_thread_malloc(BLK_SIZE_MIN);
81 	zassert_not_null(ptr, "bytes allocation failed from system pool");
82 	k_free(ptr);
83 }
84 
malloc_in_thread_handler(void * p1,void * p2,void * p3)85 static void malloc_in_thread_handler(void *p1, void *p2, void *p3)
86 {
87 	void *ptr;
88 
89 	k_current_get()->resource_pool = NULL;
90 
91 	ptr = (char *)z_thread_malloc(BLK_SIZE_MIN);
92 	zassert_is_null(ptr, "bytes allocation failed from system pool");
93 
94 	k_sem_give(&malloc_in_thread_sem);
95 }
96 
realloc_handler(void * p1,void * p2,void * p3)97 static void realloc_handler(void *p1, void *p2, void *p3)
98 {
99 	void *block1, *block2;
100 	size_t nb;
101 
102 	/* Realloc NULL pointer is equivalent to malloc() */
103 	block1 = k_realloc(NULL, BLK_SIZE_MIN);
104 	zassert_not_null(block1);
105 
106 	/* Allocate something larger than the heap */
107 	block2 = k_realloc(NULL, OVERFLOW_SIZE);
108 	/* Should fail and return NULL */
109 	zassert_is_null(block2);
110 
111 	/* Keep making the allocated buffer bigger until the heap is depleted */
112 	for (nb = 2; nb < (2 * BLK_NUM_MAX); nb++) {
113 		void *last_block1 = block1;
114 
115 		block1 = k_realloc(block1, nb * BLK_SIZE_MIN);
116 		if (block1 == NULL) {
117 			block1 = last_block1;
118 			break;
119 		}
120 	}
121 
122 	/* For boards whose subsystems use the heap and leave holes, deplete
123 	 * remaining memory using k_malloc
124 	 */
125 	void *holes[BLK_NUM_MAX * 2];
126 
127 	for (int i = 0; i < (BLK_NUM_MAX * 2); i++) {
128 		holes[i] = k_malloc(BLK_SIZE_MIN);
129 		if (holes[i] == NULL) {
130 			break;
131 		}
132 	}
133 
134 	/* Allocate buffer2 when the heap has been depleted */
135 	block2 = k_realloc(NULL, BLK_SIZE_MIN);
136 	/* Should fail and return NULL */
137 	zassert_is_null(block2);
138 
139 	/* Now, make block1 smaller */
140 	block1 = k_realloc(block1, BLK_SIZE_MIN);
141 	zassert_not_null(block1);
142 
143 	/* Try to allocate buffer2 again */
144 	block2 = k_realloc(NULL, BLK_SIZE_MIN);
145 	/* Should pass this time */
146 	zassert_not_null(block2);
147 
148 	/* Deallocate everything */
149 	k_free(block1);
150 	/* equivalent to k_free() */
151 	block2 = k_realloc(block2, 0);
152 	/* Return NULL after freed */
153 	zassert_is_null(block2);
154 
155 	/* After all allocated buffers have been freed, make sure that we are able to allocate as
156 	 * many again
157 	 */
158 	block1 = k_malloc(BLK_SIZE_MIN);
159 	zassert_not_null(block1);
160 	for (size_t i = 1; i < nb; i++) {
161 		block1 = k_realloc(block1, i * BLK_SIZE_MIN);
162 		zassert_not_null(block1);
163 	}
164 
165 	/* Free block1 with k_realloc() this time */
166 	block1 = k_realloc(block1, 0);
167 	zassert_is_null(block1);
168 
169 	/* Free holes */
170 	for (int i = 0; i < (BLK_NUM_MAX * 2); i++) {
171 		if (holes[i] == NULL) {
172 			break;
173 		}
174 		k_free(holes[i]);
175 	}
176 }
177 
178 /*test cases*/
179 
180 /**
181  * @brief Test to demonstrate k_malloc() and k_free() API usage
182  *
183  * @ingroup kernel_heap_tests
184  *
185  * @details The test allocates 4 blocks from heap memory pool
186  * using k_malloc() API. It also tries to allocate a block of size
187  * 64 bytes which fails as all the memory is allocated up. It then
188  * validates k_free() API by freeing up all the blocks which were
189  * allocated from the heap memory.
190  *
191  * @see k_malloc()
192  */
ZTEST(mheap_api,test_mheap_malloc_free)193 ZTEST(mheap_api, test_mheap_malloc_free)
194 {
195 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
196 		return;
197 	}
198 
199 	k_tid_t tid = k_thread_create(&malloc_free_tdata, malloc_free_tstack,
200 				 INCREMENTAL_FILL_STACK_SIZE,
201 				 malloc_free_handler,
202 				 NULL, NULL, NULL,
203 				 K_PRIO_PREEMPT(1), 0, K_NO_WAIT);
204 
205 	k_thread_join(tid, K_FOREVER);
206 }
207 
208 
209 
ZTEST(mheap_api,test_mheap_realloc)210 ZTEST(mheap_api, test_mheap_realloc)
211 {
212 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
213 		return;
214 	}
215 
216 	k_tid_t tid = k_thread_create(&realloc_tdata, realloc_tstack,
217 				 INCREMENTAL_FILL_STACK_SIZE,
218 				 realloc_handler,
219 				 NULL, NULL, NULL,
220 				 K_PRIO_PREEMPT(1), 0, K_NO_WAIT);
221 
222 	k_thread_join(tid, K_FOREVER);
223 }
224 
225 /**
226  * @brief Test to demonstrate k_calloc() API functionality.
227  *
228  * @ingroup kernel_heap_tests
229  *
230  * @details The test validates k_calloc() API. When requesting a
231  * huge size of space or a space larger than heap memory,
232  * the API will return NULL. The 8 blocks of memory of
233  * size 16 bytes are allocated by k_calloc() API. When allocated using
234  * k_calloc() the memory buffers have to be zeroed. Check is done, if the
235  * blocks are memset to 0 and read/write is allowed. The test is then
236  * teared up by freeing all the blocks allocated.
237  *
238  * @see k_calloc()
239  */
ZTEST(mheap_api,test_mheap_calloc)240 ZTEST(mheap_api, test_mheap_calloc)
241 {
242 	char *mem;
243 
244 	/* Requesting a huge size to validate overflow */
245 	mem = k_calloc(NMEMB, OVERFLOW_SIZE);
246 	zassert_is_null(mem, "calloc operation failed");
247 
248 	/* Requesting a space large than heap memory lead to failure */
249 	mem = k_calloc(NMEMB * 3, SIZE);
250 	zassert_is_null(mem, "calloc operation failed");
251 
252 	mem = k_calloc(NMEMB, SIZE);
253 	zassert_not_null(mem, "calloc operation failed");
254 
255 	/* Memory should be zeroed and not crash us if we read/write to it */
256 	for (int i = 0; i < BOUNDS; i++) {
257 		zassert_equal(mem[i], 0);
258 		mem[i] = 1;
259 	}
260 
261 	k_free(mem);
262 }
263 
ZTEST(mheap_api,test_k_aligned_alloc)264 ZTEST(mheap_api, test_k_aligned_alloc)
265 {
266 	void *r;
267 
268 	/*
269 	 * Allow sizes that are not necessarily a multiple of the
270 	 * alignment. The backing allocator would naturally round up to
271 	 * some minimal block size. This would make k_aligned_alloc()
272 	 * more like posix_memalign() instead of aligned_alloc(), but
273 	 * the benefit is that k_malloc() can then just be a wrapper
274 	 * around k_aligned_alloc().
275 	 */
276 	r = k_aligned_alloc(sizeof(void *), 1);
277 	/* allocation succeeds */
278 	zassert_not_equal(NULL, r, "aligned alloc of 1 byte failed");
279 	/* r is suitably aligned */
280 	zassert_equal(0, (uintptr_t)r % sizeof(void *),
281 		"%p not %u-byte-aligned",
282 		r, sizeof(void *));
283 	k_free(r);
284 
285 	/* allocate with > 8 byte alignment */
286 	r = k_aligned_alloc(16, 1);
287 	/* allocation succeeds */
288 	zassert_not_equal(NULL, r, "16-byte-aligned alloc failed");
289 	/* r is suitably aligned */
290 	zassert_equal(0, (uintptr_t)r % 16,
291 		"%p not 16-byte-aligned", r);
292 	k_free(r);
293 }
294 
295 /**
296  * @brief Validate allocation and free from system heap memory pool.
297 
298  * @details Set heap memory as resource pool. It will success when alloc
299  * a block of memory smaller than the pool and will fail when alloc
300  * a block of memory larger than the pool.
301  *
302  * @ingroup kernel_heap_tests
303  *
304  * @see k_thread_system_pool_assign(), z_thread_malloc(), k_free()
305  */
ZTEST(mheap_api,test_sys_heap_mem_pool_assign)306 ZTEST(mheap_api, test_sys_heap_mem_pool_assign)
307 {
308 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
309 		return;
310 	}
311 
312 	void *ptr;
313 
314 	k_thread_system_pool_assign(k_current_get());
315 	ptr = (char *)z_thread_malloc(BLK_SIZE_MIN/2);
316 	zassert_not_null(ptr, "bytes allocation failed from system pool");
317 	k_free(ptr);
318 
319 	zassert_is_null((char *)z_thread_malloc(K_HEAP_MEM_POOL_SIZE * 2),
320 						"overflow check failed");
321 }
322 
323 /**
324  * @brief Validate allocation and free from system heap memory pool
325  * in isr context.
326  *
327  * @details When in isr context, the kernel will successfully alloc a block of
328  * memory because in this situation, the kernel will assign the heap memory
329  * as resource pool.
330  *
331  * @ingroup kernel_heap_tests
332  *
333  * @see z_thread_malloc(), k_free()
334  */
ZTEST(mheap_api,test_malloc_in_isr)335 ZTEST(mheap_api, test_malloc_in_isr)
336 {
337 	if (!IS_ENABLED(CONFIG_IRQ_OFFLOAD)) {
338 		return;
339 	}
340 
341 	irq_offload((irq_offload_routine_t)tIsr_malloc_and_free, NULL);
342 }
343 
344 /**
345  * @brief Validate allocation and free failure when thread's resource pool
346  * is not assigned.
347  *
348  * @details When a thread's resource pool is not assigned, alloc memory will fail.
349  *
350  * @ingroup kernel_heap_tests
351  *
352  * @see z_thread_malloc()
353  */
ZTEST(mheap_api,test_malloc_in_thread)354 ZTEST(mheap_api, test_malloc_in_thread)
355 {
356 	if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
357 		return;
358 	}
359 
360 	k_tid_t tid = k_thread_create(&malloc_in_thread_tdata, malloc_in_thread_tstack,
361 				 MALLOC_IN_THREAD_STACK_SIZE, malloc_in_thread_handler,
362 				 NULL, NULL, NULL,
363 				 0, 0, K_NO_WAIT);
364 
365 	k_sem_take(&malloc_in_thread_sem, K_FOREVER);
366 
367 	k_thread_abort(tid);
368 }
369 
multi_heap_choice(struct sys_multi_heap * mheap,void * cfg,size_t align,size_t size)370 void *multi_heap_choice(struct sys_multi_heap *mheap, void *cfg,
371 			size_t align, size_t size)
372 {
373 	struct sys_heap *h = &mheaps[(int)(long)cfg];
374 
375 	return sys_heap_aligned_alloc(h, align, size);
376 }
377 
ZTEST(mheap_api,test_multi_heap)378 ZTEST(mheap_api, test_multi_heap)
379 {
380 	char *blocks[N_MULTI_HEAPS];
381 
382 	sys_multi_heap_init(&multi_heap, multi_heap_choice);
383 	for (int i = 0; i < N_MULTI_HEAPS; i++) {
384 		sys_heap_init(&mheaps[i], &heap_mem[i][0], MHEAP_BYTES);
385 		sys_multi_heap_add_heap(&multi_heap, &mheaps[i], NULL);
386 	}
387 
388 	/* Allocate half the buffer from each heap, make sure it works
389 	 * and that the pointer is in the correct memory
390 	 */
391 	for (int i = 0; i < N_MULTI_HEAPS; i++) {
392 		blocks[i] = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
393 						 MHEAP_BYTES / 2);
394 
395 		zassert_not_null(blocks[i], "allocation failed");
396 		zassert_true(blocks[i] >= &heap_mem[i][0] &&
397 			     blocks[i] < &heap_mem[i+1][0],
398 			     "allocation not in correct heap");
399 
400 		void *ptr = sys_multi_heap_realloc(&multi_heap, (void *)(long)i,
401 			blocks[i], MHEAP_BYTES / 2);
402 
403 		zassert_equal(ptr, blocks[i], "realloc moved pointer");
404 	}
405 
406 	/* Make sure all heaps fail to allocate another */
407 	for (int i = 0; i < N_MULTI_HEAPS; i++) {
408 		void *b = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
409 					       MHEAP_BYTES / 2);
410 
411 		zassert_is_null(b, "second allocation succeeded?");
412 	}
413 
414 	/* Free all blocks */
415 	for (int i = 0; i < N_MULTI_HEAPS; i++) {
416 		sys_multi_heap_free(&multi_heap, blocks[i]);
417 	}
418 
419 	/* Allocate again to make sure they're still valid */
420 	for (int i = 0; i < N_MULTI_HEAPS; i++) {
421 		blocks[i] = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
422 						 MHEAP_BYTES / 2);
423 		zassert_not_null(blocks[i], "final re-allocation failed");
424 
425 		/* Allocating smaller buffer should stay within */
426 		void *ptr = sys_multi_heap_realloc(&multi_heap, (void *)(long)i,
427 						   blocks[i], MHEAP_BYTES / 4);
428 		zassert_equal(ptr, blocks[i], "realloc should return same value");
429 
430 		ptr = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
431 					   MHEAP_BYTES / 4);
432 		zassert_between_inclusive((uintptr_t)ptr, (uintptr_t)blocks[i] + MHEAP_BYTES / 4,
433 			(uintptr_t)blocks[i] + MHEAP_BYTES / 2 - 1,
434 			"realloc failed to shrink prev buffer");
435 	}
436 
437 	/* Test realloc special cases */
438 	void *ptr = sys_multi_heap_realloc(&multi_heap, (void *)0L,
439 		blocks[0], /* size = */ 0);
440 	zassert_is_null(ptr);
441 
442 	ptr = sys_multi_heap_realloc(&multi_heap, (void *)0L,
443 		/* ptr = */ NULL, MHEAP_BYTES / 4);
444 	zassert_not_null(ptr);
445 }
446