1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/ztest.h>
7 
8 #define TIMEOUT 500
9 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
10 #define THREAD_HIGH_PRIORITY 1
11 #define THREAD_MID_PRIORITY 3
12 #define THREAD_LOW_PRIORITY 5
13 
14 /* use to pass case type to threads */
15 static ZTEST_DMEM int case_type;
16 static ZTEST_DMEM int thread_ret = TC_FAIL;
17 
18 /**TESTPOINT: init via K_MUTEX_DEFINE*/
19 K_MUTEX_DEFINE(kmutex);
20 static struct k_mutex tmutex;
21 
22 static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
23 static K_THREAD_STACK_DEFINE(tstack2, STACK_SIZE);
24 static K_THREAD_STACK_DEFINE(tstack3, STACK_SIZE);
25 static struct k_thread tdata;
26 static struct k_thread tdata2;
27 static struct k_thread tdata3;
28 
29 
30 
31 /**
32  * @defgroup kernel_mutex_tests Mutexes
33  * @ingroup all_tests
34  * @{
35  * @}
36  */
37 
tThread_entry_lock_forever(void * p1,void * p2,void * p3)38 static void tThread_entry_lock_forever(void *p1, void *p2, void *p3)
39 {
40 	zassert_false(k_mutex_lock((struct k_mutex *)p1, K_FOREVER) == 0,
41 		      "access locked resource from spawn thread");
42 	/* should not hit here */
43 }
44 
tThread_entry_lock_no_wait(void * p1,void * p2,void * p3)45 static void tThread_entry_lock_no_wait(void *p1, void *p2, void *p3)
46 {
47 	zassert_true(k_mutex_lock((struct k_mutex *)p1, K_NO_WAIT) != 0);
48 	TC_PRINT("bypass locked resource from spawn thread\n");
49 }
50 
tThread_entry_lock_timeout_fail(void * p1,void * p2,void * p3)51 static void tThread_entry_lock_timeout_fail(void *p1, void *p2, void *p3)
52 {
53 	zassert_true(k_mutex_lock((struct k_mutex *)p1,
54 				  K_MSEC(TIMEOUT - 100)) != 0, NULL);
55 	TC_PRINT("bypass locked resource from spawn thread\n");
56 }
57 
tThread_entry_lock_timeout_pass(void * p1,void * p2,void * p3)58 static void tThread_entry_lock_timeout_pass(void *p1, void *p2, void *p3)
59 {
60 	zassert_true(k_mutex_lock((struct k_mutex *)p1,
61 				  K_MSEC(TIMEOUT + 100)) == 0, NULL);
62 	TC_PRINT("access resource from spawn thread\n");
63 	k_mutex_unlock((struct k_mutex *)p1);
64 }
65 
tmutex_test_lock(struct k_mutex * pmutex,void (* entry_fn)(void *,void *,void *))66 static void tmutex_test_lock(struct k_mutex *pmutex,
67 			     void (*entry_fn)(void *, void *, void *))
68 {
69 	k_mutex_init(pmutex);
70 	k_thread_create(&tdata, tstack, STACK_SIZE,
71 			entry_fn, pmutex, NULL, NULL,
72 			K_PRIO_PREEMPT(0),
73 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
74 	zassert_true(k_mutex_lock(pmutex, K_FOREVER) == 0);
75 	TC_PRINT("access resource from main thread\n");
76 
77 	/* wait for spawn thread to take action */
78 	k_msleep(TIMEOUT);
79 }
80 
tmutex_test_lock_timeout(struct k_mutex * pmutex,void (* entry_fn)(void *,void *,void *))81 static void tmutex_test_lock_timeout(struct k_mutex *pmutex,
82 				     void (*entry_fn)(void *, void *, void *))
83 {
84 	/**TESTPOINT: test k_mutex_init mutex*/
85 	k_mutex_init(pmutex);
86 	k_thread_create(&tdata, tstack, STACK_SIZE,
87 			entry_fn, pmutex, NULL, NULL,
88 			K_PRIO_PREEMPT(0),
89 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
90 	zassert_true(k_mutex_lock(pmutex, K_FOREVER) == 0);
91 	TC_PRINT("access resource from main thread\n");
92 
93 	/* wait for spawn thread to take action */
94 	k_msleep(TIMEOUT);
95 	k_mutex_unlock(pmutex);
96 	k_msleep(TIMEOUT);
97 
98 }
99 
tmutex_test_lock_unlock(struct k_mutex * pmutex)100 static void tmutex_test_lock_unlock(struct k_mutex *pmutex)
101 {
102 	k_mutex_init(pmutex);
103 	zassert_true(k_mutex_lock(pmutex, K_FOREVER) == 0,
104 		     "fail to lock K_FOREVER");
105 	k_mutex_unlock(pmutex);
106 	zassert_true(k_mutex_lock(pmutex, K_NO_WAIT) == 0,
107 		     "fail to lock K_NO_WAIT");
108 	k_mutex_unlock(pmutex);
109 	zassert_true(k_mutex_lock(pmutex, K_MSEC(TIMEOUT)) == 0,
110 		     "fail to lock TIMEOUT");
111 	k_mutex_unlock(pmutex);
112 }
113 
tThread_T1_priority_inheritance(void * p1,void * p2,void * p3)114 static void tThread_T1_priority_inheritance(void *p1, void *p2, void *p3)
115 {
116 	ARG_UNUSED(p3);
117 
118 	/* t1 will get mutex first */
119 	zassert_true(k_mutex_lock((struct k_mutex *)p1, K_FOREVER) == 0,
120 		      "access locked resource from spawn thread T1");
121 
122 	/* record its original priority */
123 	int priority_origin = k_thread_priority_get((k_tid_t)p2);
124 
125 	/* wait for a time period to see if priority inheritance happened */
126 	k_sleep(K_MSEC(500));
127 
128 	int priority = k_thread_priority_get((k_tid_t)p2);
129 
130 	if (case_type == 1) {
131 		zassert_equal(priority, THREAD_HIGH_PRIORITY,
132 			"priority inheritance not happened!");
133 
134 		k_mutex_unlock((struct k_mutex *)p1);
135 
136 		/* check if priority set back to original one */
137 		priority = k_thread_priority_get((k_tid_t)p2);
138 
139 		zassert_equal(priority, priority_origin,
140 			"priority inheritance adjust back not happened!");
141 	} else if (case_type == 2) {
142 		zassert_equal(priority, priority_origin,
143 			"priority inheritance should not be happened!");
144 
145 		/* wait for t2 timeout to get mutex*/
146 		k_sleep(K_MSEC(TIMEOUT));
147 
148 		k_mutex_unlock((struct k_mutex *)p1);
149 	} else if (case_type == 3) {
150 		zassert_equal(priority, THREAD_HIGH_PRIORITY,
151 			"priority inheritance not happened!");
152 
153 		/* wait for t2 timeout to get mutex*/
154 		k_sleep(K_MSEC(TIMEOUT));
155 
156 		k_mutex_unlock((struct k_mutex *)p1);
157 	} else {
158 		zassert_true(0, "should not be here!");
159 	}
160 }
161 
tThread_T2_priority_inheritance(void * p1,void * p2,void * p3)162 static void tThread_T2_priority_inheritance(void *p1, void *p2, void *p3)
163 {
164 	ARG_UNUSED(p2);
165 	ARG_UNUSED(p3);
166 
167 	if (case_type == 1) {
168 		zassert_true(k_mutex_lock((struct k_mutex *)p1, K_FOREVER) == 0,
169 		      "access locked resource from spawn thread T2");
170 
171 		k_mutex_unlock((struct k_mutex *)p1);
172 	} else if (case_type == 2 || case_type == 3) {
173 		zassert_false(k_mutex_lock((struct k_mutex *)p1,
174 				K_MSEC(100)) == 0,
175 				"T2 should not get the resource");
176 	} else {
177 		zassert_true(0, "should not be here!");
178 	}
179 }
180 
tThread_lock_with_time_period(void * p1,void * p2,void * p3)181 static void tThread_lock_with_time_period(void *p1, void *p2, void *p3)
182 {
183 	ARG_UNUSED(p2);
184 	ARG_UNUSED(p3);
185 
186 	zassert_true(k_mutex_lock((struct k_mutex *)p1, K_FOREVER) == 0,
187 		      "access locked resource from spawn thread");
188 
189 	/* This thread will hold mutex for 600 ms, then release it */
190 	k_sleep(K_MSEC(TIMEOUT + 100));
191 
192 	k_mutex_unlock((struct k_mutex *)p1);
193 }
194 
tThread_waiter(void * p1,void * p2,void * p3)195 static void tThread_waiter(void *p1, void *p2, void *p3)
196 {
197 	ARG_UNUSED(p2);
198 	ARG_UNUSED(p3);
199 
200 	/* This thread participates in recursive locking tests */
201 	/* Wait for mutex to be released */
202 	zassert_true(k_mutex_lock((struct k_mutex *)p1, K_FOREVER) == 0,
203 			"Failed to get the test_mutex");
204 
205 	/* keep the next waiter waiting for a while */
206 	thread_ret = TC_PASS;
207 	k_mutex_unlock((struct k_mutex *)p1);
208 }
209 
210 /*test cases*/
ZTEST_USER(mutex_api_1cpu,test_mutex_reent_lock_forever)211 ZTEST_USER(mutex_api_1cpu, test_mutex_reent_lock_forever)
212 {
213 	/**TESTPOINT: test k_mutex_init mutex*/
214 	k_mutex_init(&tmutex);
215 	tmutex_test_lock(&tmutex, tThread_entry_lock_forever);
216 	k_thread_abort(&tdata);
217 
218 	/**TESTPOINT: test K_MUTEX_DEFINE mutex*/
219 	tmutex_test_lock(&kmutex, tThread_entry_lock_forever);
220 	k_thread_abort(&tdata);
221 }
222 
ZTEST_USER(mutex_api,test_mutex_reent_lock_no_wait)223 ZTEST_USER(mutex_api, test_mutex_reent_lock_no_wait)
224 {
225 	/**TESTPOINT: test k_mutex_init mutex*/
226 	tmutex_test_lock(&tmutex, tThread_entry_lock_no_wait);
227 
228 	/**TESTPOINT: test K_MUTEX_DEFINE mutex*/
229 	tmutex_test_lock(&kmutex, tThread_entry_lock_no_wait);
230 }
231 
ZTEST_USER(mutex_api,test_mutex_reent_lock_timeout_fail)232 ZTEST_USER(mutex_api, test_mutex_reent_lock_timeout_fail)
233 {
234 	/**TESTPOINT: test k_mutex_init mutex*/
235 	tmutex_test_lock_timeout(&tmutex, tThread_entry_lock_timeout_fail);
236 
237 	/**TESTPOINT: test K_MUTEX_DEFINE mutex*/
238 	tmutex_test_lock_timeout(&kmutex, tThread_entry_lock_no_wait);
239 }
240 
ZTEST_USER(mutex_api_1cpu,test_mutex_reent_lock_timeout_pass)241 ZTEST_USER(mutex_api_1cpu, test_mutex_reent_lock_timeout_pass)
242 {
243 	/**TESTPOINT: test k_mutex_init mutex*/
244 	tmutex_test_lock_timeout(&tmutex, tThread_entry_lock_timeout_pass);
245 
246 	/**TESTPOINT: test K_MUTEX_DEFINE mutex*/
247 	tmutex_test_lock_timeout(&kmutex, tThread_entry_lock_no_wait);
248 }
249 
ZTEST_USER(mutex_api_1cpu,test_mutex_lock_unlock)250 ZTEST_USER(mutex_api_1cpu, test_mutex_lock_unlock)
251 {
252 	/**TESTPOINT: test k_mutex_init mutex*/
253 	tmutex_test_lock_unlock(&tmutex);
254 
255 	/**TESTPOINT: test K_MUTEX_DEFINE mutex*/
256 	tmutex_test_lock_unlock(&kmutex);
257 }
258 
259 /**
260  * @brief Test recursive mutex
261  * @details To verify that getting a lock of a mutex already locked will
262  * succeed and waiters will be unblocked only when the number of locks
263  * reaches zero.
264  * @ingroup kernel_mutex_tests
265  */
ZTEST_USER(mutex_api,test_mutex_recursive)266 ZTEST_USER(mutex_api, test_mutex_recursive)
267 {
268 	k_mutex_init(&tmutex);
269 
270 	/**TESTPOINT: when mutex has no owner, we cannot unlock it */
271 	zassert_true(k_mutex_unlock(&tmutex) == -EINVAL,
272 			"fail: mutex has no owner");
273 
274 	zassert_true(k_mutex_lock(&tmutex, K_NO_WAIT) == 0,
275 			"Failed to lock mutex");
276 
277 	/**TESTPOINT: lock the mutex recursively */
278 	zassert_true(k_mutex_lock(&tmutex, K_NO_WAIT) == 0,
279 		"Failed to recursively lock mutex");
280 
281 	thread_ret = TC_FAIL;
282 	/* Spawn a waiter thread */
283 	k_thread_create(&tdata3, tstack3, STACK_SIZE,
284 			tThread_waiter, &tmutex, NULL, NULL,
285 			K_PRIO_PREEMPT(12),
286 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
287 
288 	zassert_true(thread_ret == TC_FAIL,
289 		"waiter thread should block on the recursively locked mutex");
290 
291 	zassert_true(k_mutex_unlock(&tmutex) == 0, "fail to unlock");
292 
293 	/**TESTPOINT: unlock the mutex recursively */
294 	zassert_true(thread_ret == TC_FAIL,
295 		"waiter thread should still block on the locked mutex");
296 
297 	zassert_true(k_mutex_unlock(&tmutex) == 0, "fail to unlock");
298 
299 	/* Give thread_waiter a chance to get the mutex */
300 	k_sleep(K_MSEC(1));
301 
302 	/**TESTPOINT: waiter thread got the mutex */
303 	zassert_true(thread_ret == TC_PASS,
304 			"waiter thread can't take the mutex");
305 }
306 
307 /**
308  * @brief Test mutex's priority inheritance mechanism
309  * @details To verify mutex provide priority inheritance to prevent priority
310  * inversion, and there are 3 cases need to run.
311  * The thread T1 hold the mutex first and cases list as below:
312  * - case 1. When priority T2 > T1, priority inheritance happened.
313  * - case 2. When priority T1 > T2, priority inheritance won't happened.
314  * - case 3. When priority T2 > T3 > T1, priority inheritance happened but T2
315  *   wait for timeout and T3 got the mutex.
316  * @ingroup kernel_mutex_tests
317  */
ZTEST_USER(mutex_api_1cpu,test_mutex_priority_inheritance)318 ZTEST_USER(mutex_api_1cpu, test_mutex_priority_inheritance)
319 {
320 	/**TESTPOINT: run test case 1, given priority T1 < T2 */
321 	k_mutex_init(&tmutex);
322 
323 	/* we told thread which case runs now */
324 	case_type = 1;
325 
326 	/* spawn a lower priority thread t1 for holding the mutex */
327 	k_thread_create(&tdata, tstack, STACK_SIZE,
328 		tThread_T1_priority_inheritance,
329 			&tmutex, &tdata, NULL,
330 			K_PRIO_PREEMPT(THREAD_LOW_PRIORITY),
331 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
332 
333 	/* wait for spawn thread t1 to take action */
334 	k_msleep(TIMEOUT);
335 
336 	/**TESTPOINT: The current thread does not own the mutex.*/
337 	zassert_true(k_mutex_unlock(&tmutex) == -EPERM,
338 			"fail: current thread does not own the mutex");
339 
340 	/* spawn a higher priority thread t2 for holding the mutex */
341 	k_thread_create(&tdata2, tstack2, STACK_SIZE,
342 		tThread_T2_priority_inheritance,
343 			&tmutex, &tdata2, NULL,
344 			K_PRIO_PREEMPT(THREAD_HIGH_PRIORITY),
345 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
346 
347 	/* wait for spawn thread t2 to take action */
348 	k_msleep(TIMEOUT+1000);
349 
350 	/**TESTPOINT: run test case 2, given priority T1 > T2, this means
351 	 * priority inheritance won't happen.
352 	 */
353 	k_mutex_init(&tmutex);
354 	case_type = 2;
355 
356 	/* spawn a lower priority thread t1 for holding the mutex */
357 	k_thread_create(&tdata, tstack, STACK_SIZE,
358 		tThread_T1_priority_inheritance,
359 			&tmutex, &tdata, NULL,
360 			K_PRIO_PREEMPT(THREAD_HIGH_PRIORITY),
361 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
362 
363 	/* wait for spawn thread t1 to take action */
364 	k_msleep(TIMEOUT);
365 
366 	/* spawn a higher priority thread t2 for holding the mutex */
367 	k_thread_create(&tdata2, tstack2, STACK_SIZE,
368 		tThread_T2_priority_inheritance,
369 			&tmutex, &tdata2, NULL,
370 			K_PRIO_PREEMPT(THREAD_LOW_PRIORITY),
371 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
372 
373 	/* wait for spawn thread t2 to take action */
374 	k_msleep(TIMEOUT+1000);
375 
376 	/**TESTPOINT: run test case 3, given priority T1 < T3 < T2, but t2 do
377 	 * not get mutex due to timeout.
378 	 */
379 	k_mutex_init(&tmutex);
380 	case_type = 3;
381 
382 	/* spawn a lower priority thread t1 for holding the mutex */
383 	k_thread_create(&tdata, tstack, STACK_SIZE,
384 		tThread_T1_priority_inheritance,
385 			&tmutex, &tdata, NULL,
386 			K_PRIO_PREEMPT(THREAD_LOW_PRIORITY),
387 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
388 
389 	/* wait for spawn thread t1 to take action */
390 	k_msleep(TIMEOUT);
391 
392 	/* spawn a higher priority thread t2 for holding the mutex */
393 	k_thread_create(&tdata2, tstack2, STACK_SIZE,
394 		tThread_T2_priority_inheritance,
395 			&tmutex, &tdata2, NULL,
396 			K_PRIO_PREEMPT(THREAD_HIGH_PRIORITY),
397 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
398 
399 	/* spawn a higher priority thread t3 for holding the mutex */
400 	k_thread_create(&tdata3, tstack3, STACK_SIZE,
401 		tThread_lock_with_time_period,
402 			&tmutex, &tdata3, NULL,
403 			K_PRIO_PREEMPT(THREAD_MID_PRIORITY),
404 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
405 
406 	/* wait for spawn thread t2 and t3 to take action */
407 	k_msleep(TIMEOUT+1000);
408 }
409 
tThread_mutex_lock_should_fail(void * p1,void * p2,void * p3)410 static void tThread_mutex_lock_should_fail(void *p1, void *p2, void *p3)
411 {
412 	k_timeout_t timeout;
413 	struct k_mutex *mutex = (struct k_mutex *)p1;
414 
415 	timeout.ticks = 0;
416 	timeout.ticks |= (uint64_t)(uintptr_t)p2 << 32;
417 	timeout.ticks |= (uint64_t)(uintptr_t)p3 << 0;
418 
419 	zassert_equal(-EAGAIN, k_mutex_lock(mutex, timeout), NULL);
420 }
421 
422 /**
423  * @brief Test fix for subtle race during priority inversion
424  *
425  * - A low priority thread (Tlow) locks mutex A.
426  * - A high priority thread (Thigh) blocks on mutex A, boosting the priority
427  *   of Tlow.
428  * - Thigh times out waiting for mutex A.
429  * - Before Thigh has a chance to execute, Tlow unlocks mutex A (which now
430  *   has no owner) and drops its own priority.
431  * - Thigh now gets a chance to execute and finds that it timed out, and
432  *   then enters the block of code to lower the priority of the thread that
433  *   owns mutex A (now nobody).
434  * - Thigh tries to the dereference the owner of mutex A (which is nobody,
435  *   and thus it is NULL). This leads to an exception.
436  *
437  * @ingroup kernel_mutex_tests
438  *
439  * @see k_mutex_lock()
440  */
ZTEST(mutex_api_1cpu,test_mutex_timeout_race_during_priority_inversion)441 ZTEST(mutex_api_1cpu, test_mutex_timeout_race_during_priority_inversion)
442 {
443 	k_timeout_t timeout;
444 	uintptr_t timeout_upper;
445 	uintptr_t timeout_lower;
446 	int helper_prio = k_thread_priority_get(k_current_get()) + 1;
447 
448 	k_mutex_init(&tmutex);
449 
450 	/* align to tick boundary */
451 	k_sleep(K_TICKS(1));
452 
453 	/* allow non-kobject data to be shared (via registers) */
454 	timeout = K_TIMEOUT_ABS_TICKS(k_uptime_ticks()
455 		+ CONFIG_TEST_MUTEX_API_THREAD_CREATE_TICKS);
456 	timeout_upper = timeout.ticks >> 32;
457 	timeout_lower = timeout.ticks & BIT64_MASK(32);
458 
459 	k_mutex_lock(&tmutex, K_FOREVER);
460 	k_thread_create(&tdata, tstack, K_THREAD_STACK_SIZEOF(tstack),
461 			tThread_mutex_lock_should_fail, &tmutex, (void *)timeout_upper,
462 			(void *)timeout_lower, helper_prio,
463 			K_USER | K_INHERIT_PERMS, K_NO_WAIT);
464 
465 	k_thread_priority_set(k_current_get(), K_HIGHEST_THREAD_PRIO);
466 
467 	k_sleep(timeout);
468 
469 	k_mutex_unlock(&tmutex);
470 }
471 
mutex_api_tests_setup(void)472 static void *mutex_api_tests_setup(void)
473 {
474 #ifdef CONFIG_USERSPACE
475 	k_thread_access_grant(k_current_get(), &tdata, &tstack, &tdata2,
476 				&tstack2, &tdata3, &tstack3, &kmutex,
477 				&tmutex);
478 #endif
479 	return NULL;
480 }
481 
482 ZTEST_SUITE(mutex_api, NULL, mutex_api_tests_setup, NULL, NULL, NULL);
483 ZTEST_SUITE(mutex_api_1cpu, NULL, mutex_api_tests_setup,
484 			ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
485