1 /*
2  * Copyright (c) 2012-2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Test kernel mutex APIs
10  *
11  *
12  * This module demonstrates the kernel's priority inheritance algorithm.
13  * A thread that owns a mutex is promoted to the priority level of the
14  * highest-priority thread attempting to lock the mutex.
15  *
16  * In addition, recursive locking capabilities and the use of a private mutex
17  * are also tested.
18  *
19  * This module tests the following mutex routines:
20  *
21  *    sys_mutex_lock
22  *    sys_mutex_unlock
23  *
24  * Timeline for priority inheritance testing:
25  *   - 0.0  sec: thread_05, thread_06, thread_07, thread_08, thread_09, sleep
26  *             : main thread takes mutex_1 then sleeps
27  *   - 0.0  sec: thread_11 sleeps
28  *   - 0.5  sec: thread_09 wakes and waits on mutex_1
29  *   - 1.0  sec: main thread (@ priority 9) takes mutex_2 then sleeps
30  *   - 1.5  sec: thread_08 wakes and waits on mutex_2
31  *   - 2.0  sec: main thread (@ priority 8) takes mutex_3 then sleeps
32  *   - 2.5  sec: thread_07 wakes and waits on mutex_3
33  *   - 3.0  sec: main thread (@ priority 7) takes mutex_4 then sleeps
34  *   - 3.5  sec: thread_05 wakes and waits on mutex_4
35  *   - 3.5  sec: thread_11 wakes and waits on mutex_3
36  *   - 3.75 sec: thread_06 wakes and waits on mutex_4
37  *   - 4.0  sec: main thread wakes (@ priority 5) then sleeps
38  *   - 4.5  sec: thread_05 times out
39  *   - 5.0  sec: main thread wakes (@ priority 6) then gives mutex_4
40  *             : main thread (@ priority 7) sleeps
41  *   - 5.5  sec: thread_07 times out on mutex_3
42  *   - 6.0  sec: main thread (@ priority 8) gives mutex_3
43  *             : main thread (@ priority 8) gives mutex_2
44  *             : main thread (@ priority 9) gives mutex_1
45  *             : main thread (@ priority 10) sleeps
46  */
47 
48 #include <zephyr/tc_util.h>
49 #include <zephyr/kernel.h>
50 #include <zephyr/ztest.h>
51 #include <zephyr/sys/mutex.h>
52 
53 #define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
54 
55 static ZTEST_DMEM int tc_rc = TC_PASS;         /* test case return code */
56 
57 ZTEST_BMEM SYS_MUTEX_DEFINE(private_mutex);
58 
59 
60 ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_1);
61 ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_2);
62 ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_3);
63 ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_4);
64 
65 #ifdef CONFIG_USERSPACE
66 static SYS_MUTEX_DEFINE(no_access_mutex);
67 #endif
68 static ZTEST_BMEM SYS_MUTEX_DEFINE(not_my_mutex);
69 static ZTEST_BMEM SYS_MUTEX_DEFINE(bad_count_mutex);
70 
71 #ifdef CONFIG_USERSPACE
72 #define ZTEST_USER_OR_NOT ZTEST_USER
73 #else
74 #define ZTEST_USER_OR_NOT ZTEST
75 #endif
76 
77 #ifdef CONFIG_USERSPACE
78 #define PARTICIPANT_THREAD_OPTIONS (K_USER | K_INHERIT_PERMS)
79 #else
80 #define PARTICIPANT_THREAD_OPTIONS (0)
81 #endif
82 
83 #define DEFINE_PARTICIPANT_THREAD(id)                               \
84 		K_THREAD_STACK_DEFINE(thread_##id##_stack_area, STACKSIZE); \
85 		struct k_thread thread_##id##_thread_data;                  \
86 		k_tid_t thread_##id##_tid;
87 
88 #define CREATE_PARTICIPANT_THREAD(id, pri)                                     \
89 		k_thread_create(&thread_##id##_thread_data, thread_##id##_stack_area,  \
90 			K_THREAD_STACK_SIZEOF(thread_##id##_stack_area),                   \
91 			thread_##id,                                                       \
92 			NULL, NULL, NULL,                                                  \
93 			pri, PARTICIPANT_THREAD_OPTIONS, K_FOREVER);
94 #define START_PARTICIPANT_THREAD(id) k_thread_start(&(thread_##id##_thread_data));
95 #define JOIN_PARTICIPANT_THREAD(id) k_thread_join(&(thread_##id##_thread_data), K_FOREVER);
96 
97 /**
98  *
99  * thread_05 -
100  *
101  */
102 
thread_05(void * p1,void * p2,void * p3)103 void thread_05(void *p1, void *p2, void *p3)
104 {
105 	ARG_UNUSED(p1);
106 	ARG_UNUSED(p2);
107 	ARG_UNUSED(p3);
108 
109 	int rv;
110 
111 	k_sleep(K_MSEC(3500));
112 
113 	/* Wait and boost owner priority to 5 */
114 	rv = sys_mutex_lock(&mutex_4, K_SECONDS(1));
115 	if (rv != -EAGAIN) {
116 		tc_rc = TC_FAIL;
117 		TC_ERROR("Failed to timeout on mutex %p\n", &mutex_4);
118 		return;
119 	}
120 }
121 
122 
123 /**
124  *
125  * thread_06 -
126  *
127  */
128 
thread_06(void * p1,void * p2,void * p3)129 void thread_06(void *p1, void *p2, void *p3)
130 {
131 	ARG_UNUSED(p1);
132 	ARG_UNUSED(p2);
133 	ARG_UNUSED(p3);
134 
135 	int rv;
136 
137 	k_sleep(K_MSEC(3750));
138 
139 	/*
140 	 * Wait for the mutex.  There is a higher priority level thread waiting
141 	 * on the mutex, so request will not immediately contribute to raising
142 	 * the priority of the owning thread (main thread).  When thread_05
143 	 * times out this thread will become the highest priority waiting
144 	 * thread. The priority of the owning thread (main thread) will not
145 	 * drop back to 7, but will instead drop to 6.
146 	 */
147 
148 	rv = sys_mutex_lock(&mutex_4, K_SECONDS(2));
149 	if (rv != 0) {
150 		tc_rc = TC_FAIL;
151 		TC_ERROR("Failed to take mutex %p\n", &mutex_4);
152 		return;
153 	}
154 
155 	sys_mutex_unlock(&mutex_4);
156 }
157 
158 /**
159  *
160  * thread_07 -
161  *
162  */
163 
thread_07(void * p1,void * p2,void * p3)164 void thread_07(void *p1, void *p2, void *p3)
165 {
166 	ARG_UNUSED(p1);
167 	ARG_UNUSED(p2);
168 	ARG_UNUSED(p3);
169 
170 	int rv;
171 
172 	k_sleep(K_MSEC(2500));
173 
174 	/*
175 	 * Wait and boost owner priority to 7.  While waiting, another thread of
176 	 * a very low priority level will also wait for the mutex.  thread_07 is
177 	 * expected to time out around the 5.5 second mark.  When it times out,
178 	 * thread_11 will become the only waiting thread for this mutex and the
179 	 * priority of the owning main thread will drop to 8.
180 	 */
181 
182 	rv = sys_mutex_lock(&mutex_3, K_SECONDS(3));
183 	if (rv != -EAGAIN) {
184 		tc_rc = TC_FAIL;
185 		TC_ERROR("Failed to timeout on mutex %p\n", &mutex_3);
186 		return;
187 	}
188 
189 }
190 
191 /**
192  *
193  * thread_08 -
194  *
195  */
196 
thread_08(void * p1,void * p2,void * p3)197 void thread_08(void *p1, void *p2, void *p3)
198 {
199 	ARG_UNUSED(p1);
200 	ARG_UNUSED(p2);
201 	ARG_UNUSED(p3);
202 
203 	int rv;
204 
205 	k_sleep(K_MSEC(1500));
206 
207 	/* Wait and boost owner priority to 8 */
208 	rv = sys_mutex_lock(&mutex_2, K_FOREVER);
209 	if (rv != 0) {
210 		tc_rc = TC_FAIL;
211 		TC_ERROR("Failed to take mutex %p\n", &mutex_2);
212 		return;
213 	}
214 
215 	sys_mutex_unlock(&mutex_2);
216 }
217 
218 /**
219  *
220  * thread_09 -
221  *
222  */
223 
thread_09(void * p1,void * p2,void * p3)224 void thread_09(void *p1, void *p2, void *p3)
225 {
226 	ARG_UNUSED(p1);
227 	ARG_UNUSED(p2);
228 	ARG_UNUSED(p3);
229 
230 	int rv;
231 
232 	k_sleep(K_MSEC(500));	/* Allow lower priority thread to run */
233 
234 	/*<mutex_1> is already locked. */
235 	rv = sys_mutex_lock(&mutex_1, K_NO_WAIT);
236 	if (rv != -EBUSY) {	/* This attempt to lock the mutex */
237 		/* should not succeed. */
238 		tc_rc = TC_FAIL;
239 		TC_ERROR("Failed to NOT take locked mutex %p\n", &mutex_1);
240 		return;
241 	}
242 
243 	/* Wait and boost owner priority to 9 */
244 	rv = sys_mutex_lock(&mutex_1, K_FOREVER);
245 	if (rv != 0) {
246 		tc_rc = TC_FAIL;
247 		TC_ERROR("Failed to take mutex %p\n", &mutex_1);
248 		return;
249 	}
250 
251 	sys_mutex_unlock(&mutex_1);
252 }
253 
254 /**
255  *
256  * thread_11 -
257  *
258  */
259 
thread_11(void * p1,void * p2,void * p3)260 void thread_11(void *p1, void *p2, void *p3)
261 {
262 	ARG_UNUSED(p1);
263 	ARG_UNUSED(p2);
264 	ARG_UNUSED(p3);
265 
266 	int rv;
267 
268 	k_sleep(K_MSEC(3500));
269 	rv = sys_mutex_lock(&mutex_3, K_FOREVER);
270 	if (rv != 0) {
271 		tc_rc = TC_FAIL;
272 		TC_ERROR("Failed to take mutex %p\n", &mutex_2);
273 		return;
274 	}
275 	sys_mutex_unlock(&mutex_3);
276 }
277 
278 K_THREAD_STACK_DEFINE(thread_12_stack_area, STACKSIZE);
279 struct k_thread thread_12_thread_data;
280 extern void thread_12(void *p1, void *p2, void *p3);
281 
282 
283 
284 DEFINE_PARTICIPANT_THREAD(05);
285 DEFINE_PARTICIPANT_THREAD(06);
286 DEFINE_PARTICIPANT_THREAD(07);
287 DEFINE_PARTICIPANT_THREAD(08);
288 DEFINE_PARTICIPANT_THREAD(09);
289 DEFINE_PARTICIPANT_THREAD(11);
290 
create_participant_threads(void)291 void create_participant_threads(void)
292 {
293 	CREATE_PARTICIPANT_THREAD(05, 5);
294 	CREATE_PARTICIPANT_THREAD(06, 6);
295 	CREATE_PARTICIPANT_THREAD(07, 7);
296 	CREATE_PARTICIPANT_THREAD(08, 8);
297 	CREATE_PARTICIPANT_THREAD(09, 9);
298 	CREATE_PARTICIPANT_THREAD(11, 11);
299 }
300 
start_participant_threads(void)301 void start_participant_threads(void)
302 {
303 	START_PARTICIPANT_THREAD(05);
304 	START_PARTICIPANT_THREAD(06);
305 	START_PARTICIPANT_THREAD(07);
306 	START_PARTICIPANT_THREAD(08);
307 	START_PARTICIPANT_THREAD(09);
308 	START_PARTICIPANT_THREAD(11);
309 }
310 
join_participant_threads(void)311 void join_participant_threads(void)
312 {
313 	JOIN_PARTICIPANT_THREAD(05);
314 	JOIN_PARTICIPANT_THREAD(06);
315 	JOIN_PARTICIPANT_THREAD(07);
316 	JOIN_PARTICIPANT_THREAD(08);
317 	JOIN_PARTICIPANT_THREAD(09);
318 	JOIN_PARTICIPANT_THREAD(11);
319 }
320 
321 /**
322  *
323  * @brief Main thread to test thread_mutex_xxx interfaces
324  *
325  * This thread will lock on mutex_1, mutex_2, mutex_3 and mutex_4. It later
326  * recursively locks private_mutex, releases it, then re-locks it.
327  *
328  */
329 
ZTEST_USER_OR_NOT(mutex_complex,test_mutex)330 ZTEST_USER_OR_NOT(mutex_complex, test_mutex)
331 {
332 	create_participant_threads();
333 	start_participant_threads();
334 	/*
335 	 * Main thread(test_main) priority was 10 but ztest thread runs at
336 	 * priority -1. To run the test smoothly make both main and ztest
337 	 * threads run at same priority level.
338 	 */
339 	k_thread_priority_set(k_current_get(), 10);
340 
341 	int rv;
342 	int i;
343 	struct sys_mutex *mutexes[4] = { &mutex_1, &mutex_2, &mutex_3,
344 					 &mutex_4 };
345 	struct sys_mutex *givemutex[3] = { &mutex_3, &mutex_2, &mutex_1 };
346 	int priority[4] = { 9, 8, 7, 5 };
347 	int droppri[3] = { 8, 8, 9 };
348 
349 	PRINT_LINE;
350 
351 	/*
352 	 * 1st iteration: Take mutex_1; thread_09 waits on mutex_1
353 	 * 2nd iteration: Take mutex_2: thread_08 waits on mutex_2
354 	 * 3rd iteration: Take mutex_3; thread_07 waits on mutex_3
355 	 * 4th iteration: Take mutex_4; thread_05 waits on mutex_4
356 	 */
357 
358 	for (i = 0; i < 4; i++) {
359 		rv = sys_mutex_lock(mutexes[i], K_NO_WAIT);
360 		zassert_equal(rv, 0, "Failed to lock mutex %p\n", mutexes[i]);
361 		k_sleep(K_SECONDS(1));
362 
363 		rv = k_thread_priority_get(k_current_get());
364 		zassert_equal(rv, priority[i], "expected priority %d, not %d\n",
365 			      priority[i], rv);
366 
367 		/* Catch any errors from other threads */
368 		zassert_equal(tc_rc, TC_PASS);
369 	}
370 
371 	/* ~ 4 seconds have passed */
372 
373 	TC_PRINT("Done LOCKING!  Current priority = %d\n",
374 		 k_thread_priority_get(k_current_get()));
375 
376 	k_sleep(K_SECONDS(1));       /* thread_05 should time out */
377 
378 	/* ~ 5 seconds have passed */
379 
380 	rv = k_thread_priority_get(k_current_get());
381 	zassert_equal(rv, 6, "%s timed out and out priority should drop.\n",
382 		      "thread_05");
383 	zassert_equal(rv, 6, "Expected priority %d, not %d\n", 6, rv);
384 
385 	sys_mutex_unlock(&mutex_4);
386 	rv = k_thread_priority_get(k_current_get());
387 	zassert_equal(rv, 7, "Gave %s and priority should drop.\n", "mutex_4");
388 	zassert_equal(rv, 7, "Expected priority %d, not %d\n", 7, rv);
389 
390 	k_sleep(K_SECONDS(1));       /* thread_07 should time out */
391 
392 	/* ~ 6 seconds have passed */
393 
394 	for (i = 0; i < 3; i++) {
395 		rv = k_thread_priority_get(k_current_get());
396 		zassert_equal(rv, droppri[i], "Expected priority %d, not %d\n",
397 			      droppri[i], rv);
398 		sys_mutex_unlock(givemutex[i]);
399 
400 		zassert_equal(tc_rc, TC_PASS);
401 	}
402 
403 	rv = k_thread_priority_get(k_current_get());
404 	zassert_equal(rv, 10, "Expected priority %d, not %d\n", 10, rv);
405 
406 	k_sleep(K_SECONDS(1));     /* Give thread_11 time to run */
407 
408 	zassert_equal(tc_rc, TC_PASS);
409 
410 	/* test recursive locking using a private mutex */
411 
412 	TC_PRINT("Testing recursive locking\n");
413 
414 	rv = sys_mutex_lock(&private_mutex, K_NO_WAIT);
415 	zassert_equal(rv, 0, "Failed to lock private mutex");
416 
417 	rv = sys_mutex_lock(&private_mutex, K_NO_WAIT);
418 	zassert_equal(rv, 0, "Failed to recursively lock private mutex");
419 
420 	/* Start thread */
421 	k_thread_create(&thread_12_thread_data, thread_12_stack_area, STACKSIZE,
422 			thread_12, NULL, NULL, NULL,
423 			K_PRIO_PREEMPT(12), PARTICIPANT_THREAD_OPTIONS, K_NO_WAIT);
424 	k_sleep(K_MSEC(5));     /* Give thread_12 a chance to block on the mutex */
425 
426 	sys_mutex_unlock(&private_mutex);
427 	sys_mutex_unlock(&private_mutex); /* thread_12 should now have lock */
428 
429 	rv = sys_mutex_lock(&private_mutex, K_NO_WAIT);
430 	zassert_equal(rv, -EBUSY, "Unexpectedly got lock on private mutex");
431 
432 	rv = sys_mutex_lock(&private_mutex, K_SECONDS(1));
433 	zassert_equal(rv, 0, "Failed to re-obtain lock on private mutex");
434 
435 	sys_mutex_unlock(&private_mutex);
436 	join_participant_threads();
437 	TC_PRINT("Recursive locking tests successful\n");
438 }
439 
440 /* We deliberately disable userspace, even on platforms that
441  * support it, so that the alternate implementation of sys_mutex
442  * (which is just a very thin wrapper to k_mutex) is exercised.
443  * This requires us to not attempt to start the tests in user
444  * mode, as this will otherwise fail an assertion in the thread code.
445  */
ZTEST(mutex_complex,test_supervisor_access)446 ZTEST(mutex_complex, test_supervisor_access)
447 {
448 	int rv;
449 
450 #ifdef CONFIG_USERSPACE
451 	/* coverage for get_k_mutex checks */
452 	rv = sys_mutex_lock((struct sys_mutex *)NULL, K_NO_WAIT);
453 	zassert_true(rv == -EINVAL, "accepted bad mutex pointer");
454 	rv = sys_mutex_lock((struct sys_mutex *)k_current_get(), K_NO_WAIT);
455 	zassert_true(rv == -EINVAL, "accepted object that was not a mutex");
456 	rv = sys_mutex_unlock((struct sys_mutex *)NULL);
457 	zassert_true(rv == -EINVAL, "accepted bad mutex pointer");
458 	rv = sys_mutex_unlock((struct sys_mutex *)k_current_get());
459 	zassert_true(rv == -EINVAL, "accepted object that was not a mutex");
460 #endif /* CONFIG_USERSPACE */
461 
462 	rv = sys_mutex_unlock(&not_my_mutex);
463 	zassert_true(rv == -EPERM, "unlocked a mutex that wasn't owner");
464 	rv = sys_mutex_unlock(&bad_count_mutex);
465 	zassert_true(rv == -EINVAL, "mutex wasn't locked");
466 }
467 
ZTEST_USER_OR_NOT(mutex_complex,test_user_access)468 ZTEST_USER_OR_NOT(mutex_complex, test_user_access)
469 {
470 #ifdef CONFIG_USERSPACE
471 	int rv;
472 
473 	rv = sys_mutex_lock(&no_access_mutex, K_NO_WAIT);
474 	zassert_true(rv == -EACCES, "accessed mutex not in memory domain");
475 	rv = sys_mutex_unlock(&no_access_mutex);
476 	zassert_true(rv == -EACCES, "accessed mutex not in memory domain");
477 #else
478 	ztest_test_skip();
479 #endif /* CONFIG_USERSPACE */
480 }
481 
482 /*test case main entry*/
sys_mutex_tests_setup(void)483 static void *sys_mutex_tests_setup(void)
484 {
485 	int rv;
486 
487 /* We are on the main thread (supervisor thread).
488  * Grant necessary permissions to the main thread.
489  * The ztest thread (user thread) will inherit them.
490  */
491 #ifdef CONFIG_USERSPACE
492 	k_thread_access_grant(k_current_get(),
493 				&thread_05_thread_data, &thread_05_stack_area,
494 				&thread_06_thread_data, &thread_06_stack_area,
495 				&thread_07_thread_data, &thread_07_stack_area,
496 				&thread_08_thread_data, &thread_08_stack_area,
497 				&thread_09_thread_data, &thread_09_stack_area,
498 				&thread_11_thread_data, &thread_11_stack_area,
499 				&thread_12_thread_data, &thread_12_stack_area);
500 #endif
501 	rv = sys_mutex_lock(&not_my_mutex, K_NO_WAIT);
502 	if (rv != 0) {
503 		TC_ERROR("Failed to take mutex %p\n", &not_my_mutex);
504 	}
505 	return NULL;
506 }
507 
508 ZTEST_SUITE(mutex_complex, NULL, sys_mutex_tests_setup, NULL, NULL, NULL);
509