1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/ztest_error_hook.h>
9
10 /* Macro declarations */
11 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
12 #define SEM_INIT_VAL (0U)
13 #define SEM_MAX_VAL (3U)
14 #define TOTAL_MAX (4U)
15 #define STACK_NUMS 5
16 #define PRIO 5
17 #define LOW_PRIO 8
18 #define HIGH_PRIO 2
19
20 static K_THREAD_STACK_ARRAY_DEFINE(multi_stack_give, STACK_NUMS, STACK_SIZE);
21 static K_THREAD_STACK_ARRAY_DEFINE(multi_stack_take, STACK_NUMS, STACK_SIZE);
22
23 static struct k_thread multi_tid_give[STACK_NUMS];
24 static struct k_thread multi_tid_take[STACK_NUMS];
25 static struct k_sem usage_sem, sync_sem, limit_sem, uninit_sem;
26 static ZTEST_DMEM int flag;
27 static ZTEST_DMEM atomic_t atomic_count;
28
29 /**
30 * @ingroup all_tests
31 * @{
32 * @}
33 */
34
sem_thread_give_uninit(void * p1,void * p2,void * p3)35 static void sem_thread_give_uninit(void *p1, void *p2, void *p3)
36 {
37 ztest_set_fault_valid(true);
38
39 /* use sem without initialise */
40 k_sem_give(&uninit_sem);
41
42 ztest_test_fail();
43 }
44
sem_thread_give(void * p1,void * p2,void * p3)45 static void sem_thread_give(void *p1, void *p2, void *p3)
46 {
47 flag = 1;
48 k_sem_give(&usage_sem);
49 }
50
thread_low_prio_sem_take(void * p1,void * p2,void * p3)51 static void thread_low_prio_sem_take(void *p1, void *p2, void *p3)
52 {
53 k_sem_take(&usage_sem, K_FOREVER);
54
55 flag = LOW_PRIO;
56 k_sem_give(&sync_sem);
57 }
58
thread_high_prio_sem_take(void * p1,void * p2,void * p3)59 static void thread_high_prio_sem_take(void *p1, void *p2, void *p3)
60 {
61 k_sem_take(&usage_sem, K_FOREVER);
62
63 flag = HIGH_PRIO;
64 k_sem_give(&sync_sem);
65 }
66
67 /**
68 * @brief Test semaphore usage with multiple thread
69 *
70 * @details Using semaphore with some situations
71 * - Use a uninitialized semaphore
72 * - Use semaphore normally
73 * - Use semaphore with different priority threads
74 *
75 * @ingroup kernel_semaphore_tests
76 */
ZTEST_USER(kernel_sys_sem,test_multiple_thread_sem_usage)77 ZTEST_USER(kernel_sys_sem, test_multiple_thread_sem_usage)
78 {
79 k_sem_init(&usage_sem, SEM_INIT_VAL, SEM_MAX_VAL);
80 k_sem_init(&sync_sem, SEM_INIT_VAL, SEM_MAX_VAL);
81 /* Use a semaphore to synchronize processing between threads */
82 k_sem_reset(&usage_sem);
83 k_thread_create(&multi_tid_give[0], multi_stack_give[0], STACK_SIZE,
84 sem_thread_give, NULL, NULL,
85 NULL, PRIO, K_USER | K_INHERIT_PERMS,
86 K_NO_WAIT);
87
88 k_sem_take(&usage_sem, K_FOREVER);
89 zassert_equal(flag, 1, "value != 1");
90 zassert_equal(k_sem_count_get(&usage_sem), 0, "sem not be took");
91
92 k_sem_reset(&usage_sem);
93 /* Use sem with different priority thread */
94 k_thread_create(&multi_tid_take[0], multi_stack_take[0], STACK_SIZE,
95 thread_low_prio_sem_take, NULL, NULL,
96 NULL, LOW_PRIO, K_USER | K_INHERIT_PERMS,
97 K_NO_WAIT);
98
99 k_thread_create(&multi_tid_take[1], multi_stack_take[1], STACK_SIZE,
100 thread_high_prio_sem_take, NULL, NULL,
101 NULL, HIGH_PRIO, K_USER | K_INHERIT_PERMS,
102 K_NO_WAIT);
103
104 k_sleep(K_MSEC(50));
105
106 /* Verify if the high prio thread take sem first */
107 k_sem_give(&usage_sem);
108 k_sem_take(&sync_sem, K_FOREVER);
109 zassert_equal(flag, HIGH_PRIO, "high prio value error");
110
111 k_sem_give(&usage_sem);
112 k_sem_take(&sync_sem, K_FOREVER);
113 zassert_equal(flag, LOW_PRIO, "low prio value error");
114
115 k_thread_join(&multi_tid_give[0], K_FOREVER);
116 k_thread_join(&multi_tid_take[0], K_FOREVER);
117 k_thread_join(&multi_tid_take[1], K_FOREVER);
118
119 k_thread_create(&multi_tid_give[1], multi_stack_give[1], STACK_SIZE,
120 sem_thread_give_uninit, NULL, NULL,
121 NULL, PRIO, K_USER | K_INHERIT_PERMS,
122 K_NO_WAIT);
123 k_sleep(K_MSEC(20));
124 k_thread_join(&multi_tid_give[1], K_FOREVER);
125 }
126
multi_thread_sem_give(void * p1,void * p2,void * p3)127 static void multi_thread_sem_give(void *p1, void *p2, void *p3)
128 {
129 int count;
130
131 (void)atomic_inc(&atomic_count);
132 count = atomic_get(&atomic_count);
133 k_sem_give(&limit_sem);
134
135 if (count < TOTAL_MAX) {
136 zassert_equal(k_sem_count_get(&limit_sem), count, "multi get sem error");
137 } else {
138 zassert_equal(k_sem_count_get(&limit_sem), SEM_MAX_VAL, "count > SEM_MAX_VAL");
139 }
140
141 k_sem_take(&sync_sem, K_FOREVER);
142 }
143
multi_thread_sem_take(void * p1,void * p2,void * p3)144 static void multi_thread_sem_take(void *p1, void *p2, void *p3)
145 {
146 int count;
147
148 k_sem_take(&limit_sem, K_FOREVER);
149 (void)atomic_dec(&atomic_count);
150 count = atomic_get(&atomic_count);
151
152 if (count >= 0) {
153 zassert_equal(k_sem_count_get(&limit_sem), count, "multi take sem error");
154 } else {
155 zassert_equal(k_sem_count_get(&limit_sem), 0, "count < SEM_INIT_VAL");
156 }
157
158 k_sem_give(&sync_sem);
159 }
160
161 /**
162 * @brief Test max semaphore can be give and take with multiple thread
163 *
164 * @details
165 * - Define and initialize semaphore and thread.
166 * - Give sem by multiple threads.
167 * - Verify more than max count about semaphore can reach.
168 * - Take sem by multiple threads and verify if sem count is correct.
169 *
170 * @ingroup kernel_semaphore_tests
171 */
ZTEST_USER(kernel_sys_sem,test_multi_thread_sem_limit)172 ZTEST_USER(kernel_sys_sem, test_multi_thread_sem_limit)
173 {
174 k_sem_init(&limit_sem, SEM_INIT_VAL, SEM_MAX_VAL);
175 k_sem_init(&sync_sem, SEM_INIT_VAL, SEM_MAX_VAL);
176
177 (void)atomic_set(&atomic_count, 0);
178 for (int i = 1; i <= TOTAL_MAX; i++) {
179 k_thread_create(&multi_tid_give[i], multi_stack_give[i], STACK_SIZE,
180 multi_thread_sem_give, NULL, NULL, NULL,
181 i, K_USER | K_INHERIT_PERMS, K_NO_WAIT);
182 }
183
184 k_sleep(K_MSEC(50));
185
186 (void)atomic_set(&atomic_count, SEM_MAX_VAL);
187 for (int i = 1; i <= TOTAL_MAX; i++) {
188 k_thread_create(&multi_tid_take[i], multi_stack_take[i], STACK_SIZE,
189 multi_thread_sem_take, NULL, NULL, NULL,
190 PRIO, K_USER | K_INHERIT_PERMS, K_NO_WAIT);
191 }
192
193 /* cleanup all threads for the following test cases */
194 k_sleep(K_MSEC(50));
195 for (int i = 1; i <= TOTAL_MAX; i++) {
196 k_thread_abort(&multi_tid_give[i]);
197 k_thread_abort(&multi_tid_take[i]);
198 }
199 }
200
test_init(void)201 void *test_init(void)
202 {
203 k_thread_access_grant(k_current_get(), &usage_sem, &sync_sem, &limit_sem,
204 &multi_tid_give[0], &multi_tid_give[1],
205 &multi_tid_give[2], &multi_tid_give[3],
206 &multi_tid_give[4], &multi_tid_take[4],
207 &multi_tid_take[2], &multi_tid_take[3],
208 &multi_tid_take[0], &multi_tid_take[1],
209 &multi_tid_give[5], &multi_tid_take[5],
210 &multi_stack_take[0], &multi_stack_take[1],
211 &multi_stack_take[3], &multi_stack_take[4],
212 &multi_stack_take[2], &multi_stack_give[0],
213 &multi_stack_give[1], &multi_stack_give[2],
214 &multi_stack_give[3], &multi_stack_give[4]);
215 return NULL;
216 }
217 ZTEST_SUITE(kernel_sys_sem, NULL, test_init,
218 ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
219