1 /*
2 * Copyright (c) 2016 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <kernel_internal.h>
9 #include <zephyr/irq_offload.h>
10 #include <zephyr/sys/multi_heap.h>
11 #include "test_mheap.h"
12
13 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
14 #define OVERFLOW_SIZE SIZE_MAX
15
16 #define NMEMB 8
17 #define SIZE 16
18 #define BOUNDS (NMEMB * SIZE)
19
20 #define N_MULTI_HEAPS 4
21 #define MHEAP_BYTES 128
22
23 static struct sys_multi_heap multi_heap;
24 static char heap_mem[N_MULTI_HEAPS][MHEAP_BYTES];
25 static struct sys_heap mheaps[N_MULTI_HEAPS];
26
27 K_SEM_DEFINE(thread_sem, 0, 1);
28 K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
29 struct k_thread tdata;
30
tIsr_malloc_and_free(void * data)31 static void tIsr_malloc_and_free(void *data)
32 {
33 ARG_UNUSED(data);
34 void *ptr;
35
36 ptr = (char *)z_thread_malloc(BLK_SIZE_MIN);
37 zassert_not_null(ptr, "bytes allocation failed from system pool");
38 k_free(ptr);
39 }
40
thread_entry(void * p1,void * p2,void * p3)41 static void thread_entry(void *p1, void *p2, void *p3)
42 {
43 void *ptr;
44
45 k_current_get()->resource_pool = NULL;
46
47 ptr = (char *)z_thread_malloc(BLK_SIZE_MIN);
48 zassert_is_null(ptr, "bytes allocation failed from system pool");
49
50 k_sem_give(&thread_sem);
51 }
52
53 /*test cases*/
54
55 /**
56 * @brief Test to demonstrate k_malloc() and k_free() API usage
57 *
58 * @ingroup kernel_heap_tests
59 *
60 * @details The test allocates 4 blocks from heap memory pool
61 * using k_malloc() API. It also tries to allocate a block of size
62 * 64 bytes which fails as all the memory is allocated up. It then
63 * validates k_free() API by freeing up all the blocks which were
64 * allocated from the heap memory.
65 *
66 * @see k_malloc()
67 */
ZTEST(mheap_api,test_mheap_malloc_free)68 ZTEST(mheap_api, test_mheap_malloc_free)
69 {
70 void *block[2 * BLK_NUM_MAX], *block_fail;
71 int nb;
72
73 for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
74 /**
75 * TESTPOINT: This routine provides traditional malloc()
76 * semantics. Memory is allocated from the heap memory pool.
77 */
78 block[nb] = k_malloc(BLK_SIZE_MIN);
79 if (block[nb] == NULL) {
80 break;
81 }
82 }
83
84 block_fail = k_malloc(BLK_SIZE_MIN);
85 /** TESTPOINT: Return NULL if fail.*/
86 zassert_is_null(block_fail, NULL);
87
88 for (int i = 0; i < nb; i++) {
89 /**
90 * TESTPOINT: This routine provides traditional free()
91 * semantics. The memory being returned must have been allocated
92 * from the heap memory pool.
93 */
94 k_free(block[i]);
95 }
96 /** TESTPOINT: If ptr is NULL, no operation is performed.*/
97 k_free(NULL);
98
99 /** TESTPOINT: Return NULL if fail.*/
100 block_fail = k_malloc(OVERFLOW_SIZE);
101 zassert_is_null(block_fail, NULL);
102 }
103
ZTEST(mheap_api,test_mheap_realloc)104 ZTEST(mheap_api, test_mheap_realloc)
105 {
106 void *block1, *block2;
107 size_t nb;
108
109 /* Realloc NULL pointer is equivalent to malloc() */
110 block1 = k_realloc(NULL, BLK_SIZE_MIN);
111 zassert_not_null(block1);
112
113 /* Allocate something larger than the heap */
114 block2 = k_realloc(NULL, OVERFLOW_SIZE);
115 /* Should fail and return NULL */
116 zassert_is_null(block2);
117
118 /* Keep making the allocated buffer bigger until the heap is depleted */
119 for (nb = 2; nb < (2 * BLK_NUM_MAX); nb++) {
120 void *last_block1 = block1;
121
122 block1 = k_realloc(block1, nb * BLK_SIZE_MIN);
123 if (block1 == NULL) {
124 block1 = last_block1;
125 break;
126 }
127 }
128
129 /* Allocate buffer2 when the heap has been depleted */
130 block2 = k_realloc(NULL, BLK_SIZE_MIN);
131 /* Should fail and return NULL */
132 zassert_is_null(block2);
133
134 /* Now, make block1 smaller */
135 block1 = k_realloc(block1, BLK_SIZE_MIN);
136 zassert_not_null(block1);
137
138 /* Try to allocate buffer2 again */
139 block2 = k_realloc(NULL, BLK_SIZE_MIN);
140 /* Should pass this time */
141 zassert_not_null(block2);
142
143 /* Deallocate everything */
144 k_free(block1);
145 /* equivalent to k_free() */
146 block2 = k_realloc(block2, 0);
147 /* Return NULL after freed */
148 zassert_is_null(block2);
149
150 /* After all allocated buffers have been freed, make sure that we are able to allocate as
151 * many again
152 */
153 block1 = k_malloc(BLK_SIZE_MIN);
154 zassert_not_null(block1);
155 for (size_t i = 1; i < nb; i++) {
156 block1 = k_realloc(block1, i * BLK_SIZE_MIN);
157 zassert_not_null(block1);
158 }
159
160 /* Free block1 with k_realloc() this time */
161 block1 = k_realloc(block1, 0);
162 zassert_is_null(block1);
163 }
164
165 /**
166 * @brief Test to demonstrate k_calloc() API functionality.
167 *
168 * @ingroup kernel_heap_tests
169 *
170 * @details The test validates k_calloc() API. When requesting a
171 * huge size of space or a space larger than heap memory,
172 * the API will return NULL. The 8 blocks of memory of
173 * size 16 bytes are allocated by k_calloc() API. When allocated using
174 * k_calloc() the memory buffers have to be zeroed. Check is done, if the
175 * blocks are memset to 0 and read/write is allowed. The test is then
176 * teared up by freeing all the blocks allocated.
177 *
178 * @see k_calloc()
179 */
ZTEST(mheap_api,test_mheap_calloc)180 ZTEST(mheap_api, test_mheap_calloc)
181 {
182 char *mem;
183
184 /* Requesting a huge size to validate overflow */
185 mem = k_calloc(NMEMB, OVERFLOW_SIZE);
186 zassert_is_null(mem, "calloc operation failed");
187
188 /* Requesting a space large than heap memory lead to failure */
189 mem = k_calloc(NMEMB * 3, SIZE);
190 zassert_is_null(mem, "calloc operation failed");
191
192 mem = k_calloc(NMEMB, SIZE);
193 zassert_not_null(mem, "calloc operation failed");
194
195 /* Memory should be zeroed and not crash us if we read/write to it */
196 for (int i = 0; i < BOUNDS; i++) {
197 zassert_equal(mem[i], 0);
198 mem[i] = 1;
199 }
200
201 k_free(mem);
202 }
203
ZTEST(mheap_api,test_k_aligned_alloc)204 ZTEST(mheap_api, test_k_aligned_alloc)
205 {
206 void *r;
207
208 /*
209 * Allow sizes that are not necessarily a multiple of the
210 * alignment. The backing allocator would naturally round up to
211 * some minimal block size. This would make k_aligned_alloc()
212 * more like posix_memalign() instead of aligned_alloc(), but
213 * the benefit is that k_malloc() can then just be a wrapper
214 * around k_aligned_alloc().
215 */
216 r = k_aligned_alloc(sizeof(void *), 1);
217 /* allocation succeeds */
218 zassert_not_equal(NULL, r, "aligned alloc of 1 byte failed");
219 /* r is suitably aligned */
220 zassert_equal(0, (uintptr_t)r % sizeof(void *),
221 "%p not %u-byte-aligned",
222 r, sizeof(void *));
223 k_free(r);
224
225 /* allocate with > 8 byte alignment */
226 r = k_aligned_alloc(16, 1);
227 /* allocation succeeds */
228 zassert_not_equal(NULL, r, "16-byte-aligned alloc failed");
229 /* r is suitably aligned */
230 zassert_equal(0, (uintptr_t)r % 16,
231 "%p not 16-byte-aligned", r);
232 k_free(r);
233 }
234
235 /**
236 * @brief Validate allocation and free from system heap memory pool.
237
238 * @details Set heap memory as resource pool. It will success when alloc
239 * a block of memory smaller than the pool and will fail when alloc
240 * a block of memory larger than the pool.
241 *
242 * @ingroup kernel_heap_tests
243 *
244 * @see k_thread_system_pool_assign(), z_thread_malloc(), k_free()
245 */
ZTEST(mheap_api,test_sys_heap_mem_pool_assign)246 ZTEST(mheap_api, test_sys_heap_mem_pool_assign)
247 {
248 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
249 return;
250 }
251
252 void *ptr;
253
254 k_thread_system_pool_assign(k_current_get());
255 ptr = (char *)z_thread_malloc(BLK_SIZE_MIN/2);
256 zassert_not_null(ptr, "bytes allocation failed from system pool");
257 k_free(ptr);
258
259 zassert_is_null((char *)z_thread_malloc(BLK_SIZE_MAX * 2),
260 "overflow check failed");
261 }
262
263 /**
264 * @brief Validate allocation and free from system heap memory pool
265 * in isr context.
266 *
267 * @details When in isr context, the kernel will successfully alloc a block of
268 * memory because in this situation, the kernel will assign the heap memory
269 * as resource pool.
270 *
271 * @ingroup kernel_heap_tests
272 *
273 * @see z_thread_malloc(), k_free()
274 */
ZTEST(mheap_api,test_malloc_in_isr)275 ZTEST(mheap_api, test_malloc_in_isr)
276 {
277 if (!IS_ENABLED(CONFIG_IRQ_OFFLOAD)) {
278 return;
279 }
280
281 irq_offload((irq_offload_routine_t)tIsr_malloc_and_free, NULL);
282 }
283
284 /**
285 * @brief Validate allocation and free failure when thread's resource pool
286 * is not assigned.
287 *
288 * @details When a thread's resource pool is not assigned, alloc memory will fail.
289 *
290 * @ingroup kernel_heap_tests
291 *
292 * @see z_thread_malloc()
293 */
ZTEST(mheap_api,test_malloc_in_thread)294 ZTEST(mheap_api, test_malloc_in_thread)
295 {
296 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
297 return;
298 }
299
300 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
301 thread_entry, NULL, NULL, NULL,
302 0, 0, K_NO_WAIT);
303
304 k_sem_take(&thread_sem, K_FOREVER);
305
306 k_thread_abort(tid);
307 }
308
multi_heap_choice(struct sys_multi_heap * mheap,void * cfg,size_t align,size_t size)309 void *multi_heap_choice(struct sys_multi_heap *mheap, void *cfg,
310 size_t align, size_t size)
311 {
312 struct sys_heap *h = &mheaps[(int)(long)cfg];
313
314 return sys_heap_aligned_alloc(h, align, size);
315 }
316
ZTEST(mheap_api,test_multi_heap)317 ZTEST(mheap_api, test_multi_heap)
318 {
319 char *blocks[N_MULTI_HEAPS];
320
321 sys_multi_heap_init(&multi_heap, multi_heap_choice);
322 for (int i = 0; i < N_MULTI_HEAPS; i++) {
323 sys_heap_init(&mheaps[i], &heap_mem[i][0], MHEAP_BYTES);
324 sys_multi_heap_add_heap(&multi_heap, &mheaps[i], NULL);
325 }
326
327 /* Allocate half the buffer from each heap, make sure it works
328 * and that the pointer is in the correct memory
329 */
330 for (int i = 0; i < N_MULTI_HEAPS; i++) {
331 blocks[i] = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
332 MHEAP_BYTES / 2);
333
334 zassert_not_null(blocks[i], "allocation failed");
335 zassert_true(blocks[i] >= &heap_mem[i][0] &&
336 blocks[i] < &heap_mem[i+1][0],
337 "allocation not in correct heap");
338
339 void *ptr = sys_multi_heap_realloc(&multi_heap, (void *)(long)i,
340 blocks[i], MHEAP_BYTES / 2);
341
342 zassert_equal(ptr, blocks[i], "realloc moved pointer");
343 }
344
345 /* Make sure all heaps fail to allocate another */
346 for (int i = 0; i < N_MULTI_HEAPS; i++) {
347 void *b = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
348 MHEAP_BYTES / 2);
349
350 zassert_is_null(b, "second allocation succeeded?");
351 }
352
353 /* Free all blocks */
354 for (int i = 0; i < N_MULTI_HEAPS; i++) {
355 sys_multi_heap_free(&multi_heap, blocks[i]);
356 }
357
358 /* Allocate again to make sure they're still valid */
359 for (int i = 0; i < N_MULTI_HEAPS; i++) {
360 blocks[i] = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
361 MHEAP_BYTES / 2);
362 zassert_not_null(blocks[i], "final re-allocation failed");
363
364 /* Allocating smaller buffer should stay within */
365 void *ptr = sys_multi_heap_realloc(&multi_heap, (void *)(long)i,
366 blocks[i], MHEAP_BYTES / 4);
367 zassert_equal(ptr, blocks[i], "realloc should return same value");
368
369 ptr = sys_multi_heap_alloc(&multi_heap, (void *)(long)i,
370 MHEAP_BYTES / 4);
371 zassert_between_inclusive((uintptr_t)ptr, (uintptr_t)blocks[i] + MHEAP_BYTES / 4,
372 (uintptr_t)blocks[i] + MHEAP_BYTES / 2 - 1,
373 "realloc failed to shrink prev buffer");
374 }
375
376 /* Test realloc special cases */
377 void *ptr = sys_multi_heap_realloc(&multi_heap, (void *)0L,
378 blocks[0], /* size = */ 0);
379 zassert_is_null(ptr);
380
381 ptr = sys_multi_heap_realloc(&multi_heap, (void *)0L,
382 /* ptr = */ NULL, MHEAP_BYTES / 4);
383 zassert_not_null(ptr);
384 }
385