1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/irq_offload.h>
9 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
10 #define STACK_LEN 4
11 #define HIGH_T1			0xaaa
12 #define HIGH_T2			0xbbb
13 #define LOW_PRIO			0xccc
14 
15 /**TESTPOINT: init via K_STACK_DEFINE*/
16 K_STACK_DEFINE(kstack, STACK_LEN);
17 K_STACK_DEFINE(kstack_test_alloc, STACK_LEN);
18 struct k_stack stack;
19 
20 K_THREAD_STACK_DEFINE(threadstack1, STACK_SIZE);
21 struct k_thread thread_data1;
22 K_THREAD_STACK_DEFINE(threadstack_t1, STACK_SIZE);
23 static struct k_thread high_pro_thread_t1;
24 K_THREAD_STACK_DEFINE(threadstack_t2, STACK_SIZE);
25 static struct k_thread high_pro_thread_t2;
26 static ZTEST_DMEM stack_data_t data[STACK_LEN] = { 0xABCD, 0x1234 };
27 struct k_sem end_sema1;
28 
tstack_push(struct k_stack * pstack)29 static void tstack_push(struct k_stack *pstack)
30 {
31 	for (int i = 0; i < STACK_LEN; i++) {
32 		/**TESTPOINT: stack push*/
33 		k_stack_push(pstack, data[i]);
34 	}
35 }
36 
tstack_pop(struct k_stack * pstack)37 static void tstack_pop(struct k_stack *pstack)
38 {
39 	stack_data_t rx_data;
40 
41 	for (int i = STACK_LEN - 1; i >= 0; i--) {
42 		/**TESTPOINT: stack pop*/
43 		zassert_false(k_stack_pop(pstack, &rx_data, K_NO_WAIT));
44 		zassert_equal(rx_data, data[i]);
45 	}
46 }
47 
48 /*entry of contexts*/
tIsr_entry_push(const void * p)49 static void tIsr_entry_push(const void *p)
50 {
51 	tstack_push((struct k_stack *)p);
52 }
53 
tIsr_entry_pop(const void * p)54 static void tIsr_entry_pop(const void *p)
55 {
56 	tstack_pop((struct k_stack *)p);
57 }
58 
tThread_entry(void * p1,void * p2,void * p3)59 static void tThread_entry(void *p1, void *p2, void *p3)
60 {
61 	tstack_pop((struct k_stack *)p1);
62 	k_sem_give(&end_sema1);
63 	tstack_push((struct k_stack *)p1);
64 	k_sem_give(&end_sema1);
65 }
66 
tstack_thread_thread(struct k_stack * pstack)67 static void tstack_thread_thread(struct k_stack *pstack)
68 {
69 	k_sem_init(&end_sema1, 0, 1);
70 	/**TESTPOINT: thread-thread data passing via stack*/
71 	k_tid_t tid = k_thread_create(&thread_data1, threadstack1, STACK_SIZE,
72 				      tThread_entry, pstack, NULL, NULL,
73 				      K_PRIO_PREEMPT(0), K_USER |
74 				      K_INHERIT_PERMS, K_NO_WAIT);
75 	tstack_push(pstack);
76 	k_sem_take(&end_sema1, K_FOREVER);
77 
78 	k_sem_take(&end_sema1, K_FOREVER);
79 	tstack_pop(pstack);
80 
81 	/* clear the spawn thread to avoid side effect */
82 	k_thread_abort(tid);
83 }
84 
tstack_thread_isr(struct k_stack * pstack)85 static void tstack_thread_isr(struct k_stack *pstack)
86 {
87 	k_sem_init(&end_sema1, 0, 1);
88 	/**TESTPOINT: thread-isr data passing via stack*/
89 	irq_offload(tIsr_entry_push, (const void *)pstack);
90 	tstack_pop(pstack);
91 
92 	tstack_push(pstack);
93 	irq_offload(tIsr_entry_pop, (const void *)pstack);
94 }
95 
96 /**
97  * @addtogroup kernel_stack_tests
98  * @{
99  */
100 
101 /**
102  * @brief Test to verify data passing between threads via stack
103  *
104  * @details Static define and Dynamic define stacks,
105  * Then initialize them.
106  * Current thread push or pop data item into the stack.
107  * Create a new thread pop or push data item into the stack.
108  * Controlled by semaphore.
109  * Verify data passing between threads via stack
110  * And verify stack can be define at compile time.
111  *
112  * @ingroup kernel_stack_tests
113  *
114  * @see k_stack_init(), k_stack_push(), #K_STACK_DEFINE(x), k_stack_pop()
115  */
ZTEST(stack_contexts,test_stack_thread2thread)116 ZTEST(stack_contexts, test_stack_thread2thread)
117 {
118 	/**TESTPOINT: test k_stack_init stack*/
119 	k_stack_init(&stack, data, STACK_LEN);
120 	tstack_thread_thread(&stack);
121 
122 	/**TESTPOINT: test K_STACK_DEFINE stack*/
123 	tstack_thread_thread(&kstack);
124 }
125 
126 #ifdef CONFIG_USERSPACE
127 /**
128  * @brief Verifies data passing between user threads via stack
129  * @see k_stack_init(), k_stack_push(), #K_STACK_DEFINE(x), k_stack_pop()
130  */
ZTEST_USER(stack_contexts,test_stack_user_thread2thread)131 ZTEST_USER(stack_contexts, test_stack_user_thread2thread)
132 {
133 	struct k_stack *th_stack = k_object_alloc(K_OBJ_STACK);
134 
135 	zassert_not_null(th_stack, "couldn't allocate stack object");
136 	zassert_false(k_stack_alloc_init(th_stack, STACK_LEN),
137 		      "stack init failed");
138 
139 	tstack_thread_thread(th_stack);
140 }
141 #endif
142 
143 /**
144  * @brief Verifies data passing between thread and ISR via stack
145  * @see k_stack_init(), k_stack_push(), #K_STACK_DEFINE(x), k_stack_pop()
146  */
ZTEST(stack_contexts,test_stack_thread2isr)147 ZTEST(stack_contexts, test_stack_thread2isr)
148 {
149 	/**TESTPOINT: test k_stack_init stack*/
150 	k_stack_init(&stack, data, STACK_LEN);
151 	tstack_thread_isr(&stack);
152 
153 	/**TESTPOINT: test K_STACK_DEFINE stack*/
154 	tstack_thread_isr(&kstack);
155 }
156 
157 /**
158  * @see k_stack_alloc_init(), k_stack_push(), #K_STACK_DEFINE(x), k_stack_pop(),
159  * k_stack_cleanup()
160  */
ZTEST(stack_contexts,test_stack_alloc_thread2thread)161 ZTEST(stack_contexts, test_stack_alloc_thread2thread)
162 {
163 	int ret;
164 
165 	k_stack_alloc_init(&kstack_test_alloc, STACK_LEN);
166 
167 	k_sem_init(&end_sema1, 0, 1);
168 	/**TESTPOINT: thread-thread data passing via stack*/
169 	k_tid_t tid = k_thread_create(&thread_data1, threadstack1, STACK_SIZE,
170 					tThread_entry, &kstack_test_alloc,
171 					NULL, NULL, K_PRIO_PREEMPT(0), 0,
172 					K_NO_WAIT);
173 	tstack_push(&kstack_test_alloc);
174 	k_sem_take(&end_sema1, K_FOREVER);
175 
176 	k_sem_take(&end_sema1, K_FOREVER);
177 	tstack_pop(&kstack_test_alloc);
178 
179 	/* clear the spawn thread to avoid side effect */
180 	k_thread_abort(tid);
181 	k_stack_cleanup(&kstack_test_alloc);
182 
183 	/** Requested buffer allocation from the test pool.*/
184 	ret = k_stack_alloc_init(&kstack_test_alloc, (STACK_SIZE/2)+1);
185 	zassert_true(ret == -ENOMEM,
186 			"requested buffer is smaller than resource pool");
187 }
188 
low_prio_wait_for_stack(void * p1,void * p2,void * p3)189 static void low_prio_wait_for_stack(void *p1, void *p2, void *p3)
190 {
191 	struct k_stack *pstack = p1;
192 	stack_data_t output;
193 
194 	k_stack_pop(pstack, &output, K_FOREVER);
195 	zassert_true(output == LOW_PRIO,
196 	    "The low priority thread get the stack data failed lastly");
197 }
198 
high_prio_t1_wait_for_stack(void * p1,void * p2,void * p3)199 static void high_prio_t1_wait_for_stack(void *p1, void *p2, void *p3)
200 {
201 	struct k_stack *pstack = p1;
202 	stack_data_t output;
203 
204 	k_stack_pop(pstack, &output, K_FOREVER);
205 	zassert_true(output == HIGH_T1,
206 	    "The highest priority and waited longest get the stack data failed firstly");
207 }
208 
high_prio_t2_wait_for_stack(void * p1,void * p2,void * p3)209 static void high_prio_t2_wait_for_stack(void *p1, void *p2, void *p3)
210 {
211 	struct k_stack *pstack = p1;
212 	stack_data_t output;
213 
214 	k_stack_pop(pstack, &output, K_FOREVER);
215 	zassert_true(output == HIGH_T2,
216 	   "The higher priority and waited longer get the stack data failed secondly");
217 }
218 
219 /**
220  * @brief Test multi-threads to get data from stack.
221  *
222  * @details Define three threads, and set a higher priority for two of them,
223  * and set a lower priority for the last one. Then Add a delay between
224  * creating the two high priority threads.
225  * Test point:
226  * 1. Any number of threads may wait(K_FOREVER set) on an empty stack
227  * simultaneously.
228  * 2. When data is pushed, it is given to the highest priority
229  * thread that has waited longest.
230  *
231  * @ingroup kernel_stack_tests
232  */
ZTEST(stack_contexts,test_stack_multithread_competition)233 ZTEST(stack_contexts, test_stack_multithread_competition)
234 {
235 	k_stack_init(&stack, data, STACK_LEN);
236 
237 	int old_prio = k_thread_priority_get(k_current_get());
238 	int prio = 10;
239 	stack_data_t test_data[3];
240 
241 	memset(test_data, 0, sizeof(test_data));
242 	k_thread_priority_set(k_current_get(), prio);
243 
244 	/* Set up some values */
245 	test_data[0] = HIGH_T1;
246 	test_data[1] = HIGH_T2;
247 	test_data[2] = LOW_PRIO;
248 
249 	k_thread_create(&thread_data1, threadstack1, STACK_SIZE,
250 			low_prio_wait_for_stack,
251 			&stack, NULL, NULL,
252 			prio + 4, 0, K_NO_WAIT);
253 
254 	k_thread_create(&high_pro_thread_t1, threadstack_t1, STACK_SIZE,
255 			high_prio_t1_wait_for_stack,
256 			&stack, NULL, NULL,
257 			prio + 2, 0, K_NO_WAIT);
258 
259 	/* Make thread thread_data1 and high_pro_thread_t1 wait more time */
260 	k_sleep(K_MSEC(10));
261 
262 	k_thread_create(&high_pro_thread_t2, threadstack_t2, STACK_SIZE,
263 			high_prio_t2_wait_for_stack,
264 			&stack, NULL, NULL,
265 			prio + 2, 0, K_NO_WAIT);
266 
267 	/* Initialize them and block */
268 	k_sleep(K_MSEC(50));
269 
270 	/* Insert some data to wake up thread */
271 	k_stack_push(&stack, test_data[0]);
272 	k_stack_push(&stack, test_data[1]);
273 	k_stack_push(&stack, test_data[2]);
274 
275 	/* Wait for thread exiting */
276 	k_thread_join(&thread_data1, K_FOREVER);
277 	k_thread_join(&high_pro_thread_t1, K_FOREVER);
278 	k_thread_join(&high_pro_thread_t2, K_FOREVER);
279 
280 	/* Revert priority of the main thread */
281 	k_thread_priority_set(k_current_get(), old_prio);
282 }
283 
284 /**
285  * @brief Test case of requesting a buffer larger than resource pool.
286  *
287  * @details Try to request a buffer larger than resource pool for stack,
288  * then see if returns an expected value.
289  *
290  * @ingroup kernel_stack_tests
291  */
ZTEST(stack_contexts,test_stack_alloc_null)292 ZTEST(stack_contexts, test_stack_alloc_null)
293 {
294 	int ret;
295 
296 	/* Requested buffer allocation from the test pool. */
297 	ret = k_stack_alloc_init(&kstack_test_alloc, (STACK_SIZE/2)+1);
298 	zassert_true(ret == -ENOMEM,
299 			"requested buffer is smaller than resource pool");
300 }
301 
302 /**
303  * @}
304  */
305