1 /*
2 * Copyright (c) 2016 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include "test_mslab.h"
9
10 /* TESTPOINT: Statically define and initialize a memory slab*/
11 K_MEM_SLAB_DEFINE(kmslab, BLK_SIZE, BLK_NUM, BLK_ALIGN);
12 static char __aligned(BLK_ALIGN) tslab[BLK_SIZE * BLK_NUM];
13 static struct k_mem_slab mslab;
14 K_SEM_DEFINE(SEM_HELPERDONE, 0, 1);
15 K_SEM_DEFINE(SEM_REGRESSDONE, 0, 1);
16 static K_THREAD_STACK_DEFINE(stack, STACKSIZE);
17 static struct k_thread HELPER;
18
mslab_setup(void)19 void *mslab_setup(void)
20 {
21 k_mem_slab_init(&mslab, tslab, BLK_SIZE, BLK_NUM);
22
23 return NULL;
24 }
25
tmslab_alloc_free(void * data)26 void tmslab_alloc_free(void *data)
27 {
28 struct k_mem_slab *pslab = (struct k_mem_slab *)data;
29 void *block[BLK_NUM];
30
31 (void)memset(block, 0, sizeof(block));
32 /*
33 * TESTPOINT: The memory slab's buffer contains @a slab_num_blocks
34 * memory blocks that are @a slab_block_size bytes long.
35 */
36 for (int i = 0; i < BLK_NUM; i++) {
37 /* TESTPOINT: Allocate memory from a memory slab.*/
38 /* TESTPOINT: @retval 0 Memory allocated.*/
39 zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0,
40 NULL);
41 /*
42 * TESTPOINT: The block address area pointed at by @a mem is set
43 * to the starting address of the memory block.
44 */
45 zassert_not_null(block[i], NULL);
46 }
47 for (int i = 0; i < BLK_NUM; i++) {
48 /* TESTPOINT: Free memory allocated from a memory slab.*/
49 k_mem_slab_free(pslab, block[i]);
50 }
51 }
52
tmslab_alloc_align(void * data)53 static void tmslab_alloc_align(void *data)
54 {
55 struct k_mem_slab *pslab = (struct k_mem_slab *)data;
56 void *block[BLK_NUM];
57
58 for (int i = 0; i < BLK_NUM; i++) {
59 zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0,
60 NULL);
61 /*
62 * TESTPOINT: To ensure that each memory block is similarly
63 * aligned to this boundary
64 */
65 zassert_true((uintptr_t)block[i] % BLK_ALIGN == 0U);
66 }
67 for (int i = 0; i < BLK_NUM; i++) {
68 k_mem_slab_free(pslab, block[i]);
69 }
70 }
71
tmslab_alloc_timeout(void * data)72 static void tmslab_alloc_timeout(void *data)
73 {
74 struct k_mem_slab *pslab = (struct k_mem_slab *)data;
75 void *block[BLK_NUM], *block_fail;
76 int64_t tms;
77 int err;
78
79 for (int i = 0; i < BLK_NUM; i++) {
80 zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0,
81 NULL);
82 }
83
84 /* TESTPOINT: Use K_NO_WAIT to return without waiting*/
85 /* TESTPOINT: -ENOMEM Returned without waiting.*/
86 zassert_equal(k_mem_slab_alloc(pslab, &block_fail, K_NO_WAIT), -ENOMEM,
87 NULL);
88 tms = k_uptime_get();
89 err = k_mem_slab_alloc(pslab, &block_fail, K_MSEC(TIMEOUT));
90 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
91 /* TESTPOINT: -EAGAIN Waiting period timed out*/
92 zassert_equal(err, -EAGAIN);
93 /*
94 * TESTPOINT: timeout Maximum time to wait for operation to
95 * complete (in milliseconds)
96 */
97 zassert_true(k_uptime_delta(&tms) >= TIMEOUT);
98 } else {
99 /* If no multithreading any timeout is treated as K_NO_WAIT */
100 zassert_equal(err, -ENOMEM);
101 zassert_true(k_uptime_delta(&tms) < TIMEOUT);
102 }
103
104 for (int i = 0; i < BLK_NUM; i++) {
105 k_mem_slab_free(pslab, block[i]);
106 }
107 }
108
tmslab_used_get(void * data)109 static void tmslab_used_get(void *data)
110 {
111 struct k_mem_slab *pslab = (struct k_mem_slab *)data;
112 void *block[BLK_NUM], *block_fail;
113
114 for (int i = 0; i < BLK_NUM; i++) {
115 zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0,
116 NULL);
117 /* TESTPOINT: Get the number of used blocks in a memory slab.*/
118 zassert_equal(k_mem_slab_num_used_get(pslab), i + 1);
119 /*
120 * TESTPOINT: Get the number of unused blocks in a memory slab.
121 */
122 zassert_equal(k_mem_slab_num_free_get(pslab), BLK_NUM - 1 - i);
123 }
124
125 zassert_equal(k_mem_slab_alloc(pslab, &block_fail, K_NO_WAIT), -ENOMEM,
126 NULL);
127 /* free get on allocation failure*/
128 zassert_equal(k_mem_slab_num_free_get(pslab), 0);
129 /* used get on allocation failure*/
130 zassert_equal(k_mem_slab_num_used_get(pslab), BLK_NUM);
131
132 zassert_equal(k_mem_slab_alloc(pslab, &block_fail, K_MSEC(TIMEOUT)),
133 IS_ENABLED(CONFIG_MULTITHREADING) ? -EAGAIN : -ENOMEM,
134 NULL);
135 zassert_equal(k_mem_slab_num_free_get(pslab), 0);
136 zassert_equal(k_mem_slab_num_used_get(pslab), BLK_NUM);
137
138 for (int i = 0; i < BLK_NUM; i++) {
139 k_mem_slab_free(pslab, block[i]);
140 zassert_equal(k_mem_slab_num_free_get(pslab), i + 1);
141 zassert_equal(k_mem_slab_num_used_get(pslab), BLK_NUM - 1 - i);
142 }
143 }
144
helper_thread(void * p0,void * p1,void * p2)145 static void helper_thread(void *p0, void *p1, void *p2)
146 {
147 void *ptr[BLK_NUM]; /* Pointer to memory block */
148
149 ARG_UNUSED(p0);
150 ARG_UNUSED(p1);
151 ARG_UNUSED(p2);
152
153 (void)memset(ptr, 0, sizeof(ptr));
154
155 k_sem_take(&SEM_REGRESSDONE, K_FOREVER);
156
157 /* Get all blocks from the memory slab */
158 for (int i = 0; i < BLK_NUM; i++) {
159 /* Verify number of used blocks in the map */
160 zassert_equal(k_mem_slab_num_used_get(&kmslab), i,
161 "Failed k_mem_slab_num_used_get");
162
163 /* Get memory block */
164 zassert_equal(k_mem_slab_alloc(&kmslab, &ptr[i], K_NO_WAIT), 0,
165 "Failed k_mem_slab_alloc");
166 }
167
168 k_sem_give(&SEM_HELPERDONE);
169
170 k_sem_take(&SEM_REGRESSDONE, K_FOREVER);
171 k_mem_slab_free(&kmslab, ptr[0]);
172
173
174 k_sem_take(&SEM_REGRESSDONE, K_FOREVER);
175
176 /* Free all the other blocks. The first block are freed by this task */
177 for (int i = 1; i < BLK_NUM; i++) {
178 k_mem_slab_free(&kmslab, ptr[i]);
179 }
180
181 k_sem_give(&SEM_HELPERDONE);
182
183 } /* helper thread */
184
185 /*test cases*/
186 /**
187 * @brief Initialize the memory slab using k_mem_slab_init()
188 * and allocates/frees blocks.
189 *
190 * @details Initialize 3 memory blocks of block size 8 bytes
191 * using @see k_mem_slab_init() and check if number of used blocks
192 * is 0 and free blocks is equal to number of blocks initialized.
193 *
194 * @ingroup kernel_memory_slab_tests
195 */
ZTEST(mslab_api,test_mslab_kinit)196 ZTEST(mslab_api, test_mslab_kinit)
197 {
198 /* if a block_size is not word aligned, slab init return error */
199 zassert_equal(k_mem_slab_init(&mslab, tslab, BLK_SIZE + 1, BLK_NUM),
200 -EINVAL, NULL);
201 k_mem_slab_init(&mslab, tslab, BLK_SIZE, BLK_NUM);
202 zassert_equal(k_mem_slab_num_used_get(&mslab), 0);
203 zassert_equal(k_mem_slab_num_free_get(&mslab), BLK_NUM);
204 }
205
206 /**
207 * @brief Verify K_MEM_SLAB_DEFINE() with allocates/frees blocks.
208 *
209 * @details Initialize 3 memory blocks of block size 8 bytes
210 * using @see K_MEM_SLAB_DEFINE() and check if number of used blocks
211 * is 0 and free blocks is equal to number of blocks initialized.
212 *
213 * @ingroup kernel_memory_slab_tests
214 */
ZTEST(mslab_api,test_mslab_kdefine)215 ZTEST(mslab_api, test_mslab_kdefine)
216 {
217 zassert_equal(k_mem_slab_num_used_get(&kmslab), 0);
218 zassert_equal(k_mem_slab_num_free_get(&kmslab), BLK_NUM);
219 }
220
221 /**
222 * @brief Verify alloc and free of blocks from mem_slab
223 *
224 * @ingroup kernel_memory_slab_tests
225 */
ZTEST(mslab_api,test_mslab_alloc_free_thread)226 ZTEST(mslab_api, test_mslab_alloc_free_thread)
227 {
228
229 tmslab_alloc_free(&mslab);
230 }
231
232 /**
233 * @brief Allocate memory blocks and check for alignment of 8 bytes
234 *
235 * @details Allocate 3 blocks of memory from 2 memory slabs
236 * respectively and check if all blocks are aligned to 8 bytes
237 * and free them.
238 *
239 * @ingroup kernel_memory_slab_tests
240 */
ZTEST(mslab_api,test_mslab_alloc_align)241 ZTEST(mslab_api, test_mslab_alloc_align)
242 {
243 tmslab_alloc_align(&mslab);
244 tmslab_alloc_align(&kmslab);
245 }
246
247 /**
248 * @brief Verify allocation of memory blocks with timeouts
249 *
250 * @details Allocate 3 memory blocks from memory slab. Check
251 * allocation of another memory block with NO_WAIT set, since
252 * there are no blocks left to allocate in the memory slab,
253 * the allocation fails with return value -ENOMEM. Then the
254 * system up time is obtained, memory block allocation is
255 * tried with timeout of 2000 ms. Now the allocation API
256 * returns -EAGAIN as the waiting period is timeout. The
257 * test case also checks if timeout has really happened by
258 * checking delta period between the allocation request
259 * was made and return of -EAGAIN.
260 *
261 * @ingroup kernel_memory_slab_tests
262 */
ZTEST(mslab_api,test_mslab_alloc_timeout)263 ZTEST(mslab_api, test_mslab_alloc_timeout)
264 {
265 if (arch_num_cpus() != 1) {
266 ztest_test_skip();
267 }
268
269 tmslab_alloc_timeout(&mslab);
270 }
271
272 /**
273 * @brief Verify count of allocated blocks
274 *
275 * @details The test case allocates 3 blocks one after the
276 * other by checking for used block and free blocks in the
277 * memory slab - mslab. Once all 3 blocks are allocated,
278 * one more block is tried to allocates, which fails with
279 * return value -ENOMEM. It also checks the allocation with
280 * timeout. Again checks for used block and free blocks
281 * number using @see k_mem_slab_num_used_get() and
282 * @see k_mem_slab_num_free_get().
283 *
284 * @ingroup kernel_memory_slab_tests
285 */
ZTEST(mslab_api,test_mslab_used_get)286 ZTEST(mslab_api, test_mslab_used_get)
287 {
288 tmslab_used_get(&mslab);
289 tmslab_used_get(&kmslab);
290 }
291
292 /**
293 * @brief Verify pending of allocating blocks
294 *
295 * @details First, helper thread got all memory blocks,
296 * and there is no free block left. k_mem_slab_alloc() with
297 * time out will fail and return -EAGAIN.
298 * Then k_mem_slab_alloc() without timeout tries to wait for
299 * a memory block until helper thread free one.
300 *
301 * @ingroup kernel_memory_slab_tests
302 */
ZTEST(mslab_api,test_mslab_pending)303 ZTEST(mslab_api, test_mslab_pending)
304 {
305 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
306 ztest_test_skip();
307 return;
308 }
309
310 int ret_value;
311 void *b; /* Pointer to memory block */
312
313 (void)k_thread_create(&HELPER, stack, STACKSIZE,
314 helper_thread, NULL, NULL, NULL,
315 7, 0, K_NO_WAIT);
316
317 k_sem_give(&SEM_REGRESSDONE); /* Allow helper thread to run */
318
319 k_sem_take(&SEM_HELPERDONE, K_FOREVER); /* Wait for helper thread to finish */
320
321 ret_value = k_mem_slab_alloc(&kmslab, &b, K_MSEC(20));
322 zassert_equal(-EAGAIN, ret_value,
323 "Failed k_mem_slab_alloc, retValue %d\n", ret_value);
324
325 k_sem_give(&SEM_REGRESSDONE);
326
327 /* Wait for helper thread to free a block */
328
329 ret_value = k_mem_slab_alloc(&kmslab, &b, K_FOREVER);
330 zassert_equal(0, ret_value,
331 "Failed k_mem_slab_alloc, ret_value %d\n", ret_value);
332
333 k_sem_give(&SEM_REGRESSDONE);
334
335 /* Wait for helper thread to complete */
336 k_sem_take(&SEM_HELPERDONE, K_FOREVER);
337
338 /* Free memory block */
339 k_mem_slab_free(&kmslab, b);
340 }
341