1 /*
2 * Copyright (c) 2022, Meta
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9
10 #define TIMEOUT_MS 500
11
12 #define POOL_SIZE 20480
13
14 #ifdef CONFIG_USERSPACE
15 #define STACK_OBJ_SIZE Z_THREAD_STACK_SIZE_ADJUST(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
16 #else
17 #define STACK_OBJ_SIZE Z_KERNEL_STACK_SIZE_ADJUST(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
18 #endif
19
20 #define MAX_HEAP_STACKS (POOL_SIZE / STACK_OBJ_SIZE)
21
22 K_HEAP_DEFINE(stack_heap, POOL_SIZE);
23
24 ZTEST_DMEM bool tflag[MAX(CONFIG_DYNAMIC_THREAD_POOL_SIZE, MAX_HEAP_STACKS)];
25
func(void * arg1,void * arg2,void * arg3)26 static void func(void *arg1, void *arg2, void *arg3)
27 {
28 bool *flag = (bool *)arg1;
29
30 ARG_UNUSED(arg2);
31 ARG_UNUSED(arg3);
32
33 printk("Hello, dynamic world!\n");
34
35 *flag = true;
36 }
37
38 /** @brief Exercise the pool-based thread stack allocator */
ZTEST(dynamic_thread_stack,test_dynamic_thread_stack_pool)39 ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool)
40 {
41 static k_tid_t tid[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
42 static struct k_thread th[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
43 static k_thread_stack_t *stack[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
44
45 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) {
46 ztest_test_skip();
47 }
48
49 /* allocate all thread stacks from the pool */
50 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
51 stack[i] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
52 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
53
54 zassert_not_null(stack[i]);
55 }
56
57 if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
58 /* ensure 1 thread can be allocated from the heap when the pool is depleted */
59 zassert_ok(k_thread_stack_free(
60 k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
61 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0)));
62 } else {
63 /* ensure that no more thread stacks can be allocated from the pool */
64 zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
65 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0));
66 }
67
68 /* spawn our threads */
69 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
70 tflag[i] = false;
71 tid[i] = k_thread_create(&th[i], stack[i],
72 CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
73 &tflag[i], NULL, NULL, 0,
74 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
75 }
76
77 /* join all threads and check that flags have been set */
78 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
79 zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS)));
80 zassert_true(tflag[i]);
81 }
82
83 /* clean up stacks allocated from the pool */
84 for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
85 zassert_ok(k_thread_stack_free(stack[i]));
86 }
87 }
88
89 /** @brief Exercise the heap-based thread stack allocator */
ZTEST(dynamic_thread_stack,test_dynamic_thread_stack_alloc)90 ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc)
91 {
92 size_t N;
93 static k_tid_t tid[MAX_HEAP_STACKS];
94 static struct k_thread th[MAX_HEAP_STACKS];
95 static k_thread_stack_t *stack[MAX_HEAP_STACKS];
96
97 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
98 ztest_test_skip();
99 }
100
101 if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
102 ztest_test_skip();
103 }
104
105 /* allocate all thread stacks from the heap */
106 for (N = 0; N < MAX_HEAP_STACKS; ++N) {
107 stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
108 IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
109 if (stack[N] == NULL) {
110 break;
111 }
112 }
113
114 /* spwan our threads */
115 for (size_t i = 0; i < N; ++i) {
116 tflag[i] = false;
117 tid[i] = k_thread_create(&th[i], stack[i],
118 CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
119 &tflag[i], NULL, NULL, 0,
120 K_USER | K_INHERIT_PERMS, K_NO_WAIT);
121 }
122
123 /* join all threads and check that flags have been set */
124 for (size_t i = 0; i < N; ++i) {
125 zassert_ok(k_thread_join(tid[i], K_MSEC(TIMEOUT_MS)));
126 zassert_true(tflag[i]);
127 }
128
129 /* clean up stacks allocated from the heap */
130 for (size_t i = 0; i < N; ++i) {
131 zassert_ok(k_thread_stack_free(stack[i]));
132 }
133 }
134
dynamic_thread_stack_setup(void)135 static void *dynamic_thread_stack_setup(void)
136 {
137 k_thread_heap_assign(k_current_get(), &stack_heap);
138 return NULL;
139 }
140
141 ZTEST_SUITE(dynamic_thread_stack, NULL, dynamic_thread_stack_setup, NULL, NULL, NULL);
142