1 /*
2  * Copyright (c) 2018-2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <pthread.h>
8 #include <semaphore.h>
9 #include <time.h>
10 
11 #include <zephyr/sys/util.h>
12 #include <zephyr/ztest.h>
13 
14 #define DETACH_THR_ID 2
15 
16 #define N_THR_E    3
17 #define N_THR_T    4
18 #define BOUNCES    64
19 #define ONE_SECOND 1
20 
21 /* Macros to test invalid states */
22 #define PTHREAD_CANCEL_INVALID -1
23 #define SCHED_INVALID          -1
24 #define PRIO_INVALID           -1
25 #define PTHREAD_INVALID        -1
26 
27 static void *thread_top_exec(void *p1);
28 static void *thread_top_term(void *p1);
29 
30 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
31 static pthread_cond_t cvar0 = PTHREAD_COND_INITIALIZER;
32 static pthread_cond_t cvar1 = PTHREAD_COND_INITIALIZER;
33 static pthread_barrier_t barrier;
34 
35 static sem_t main_sem;
36 
37 static int bounce_failed;
38 static int bounce_done[N_THR_E];
39 
40 static int curr_bounce_thread;
41 
42 static int barrier_failed;
43 static int barrier_done[N_THR_E];
44 static int barrier_return[N_THR_E];
45 
46 /* First phase bounces execution between two threads using a condition
47  * variable, continuously testing that no other thread is mucking with
48  * the protected state.  This ends with all threads going back to
49  * sleep on the condition variable and being woken by main() for the
50  * second phase.
51  *
52  * Second phase simply lines up all the threads on a barrier, verifies
53  * that none run until the last one enters, and that all run after the
54  * exit.
55  *
56  * Test success is signaled to main() using a traditional semaphore.
57  */
58 
thread_top_exec(void * p1)59 static void *thread_top_exec(void *p1)
60 {
61 	int i, j, id = (int)POINTER_TO_INT(p1);
62 	int policy;
63 	struct sched_param schedparam;
64 
65 	pthread_getschedparam(pthread_self(), &policy, &schedparam);
66 	printk("Thread %d starting with scheduling policy %d & priority %d\n", id, policy,
67 	       schedparam.sched_priority);
68 	/* Try a double-lock here to exercise the failing case of
69 	 * trylock.  We don't support RECURSIVE locks, so this is
70 	 * guaranteed to fail.
71 	 */
72 	pthread_mutex_lock(&lock);
73 
74 	if (!pthread_mutex_trylock(&lock)) {
75 		printk("pthread_mutex_trylock inexplicably succeeded\n");
76 		bounce_failed = 1;
77 	}
78 
79 	pthread_mutex_unlock(&lock);
80 
81 	for (i = 0; i < BOUNCES; i++) {
82 
83 		pthread_mutex_lock(&lock);
84 
85 		/* Wait for the current owner to signal us, unless we
86 		 * are the very first thread, in which case we need to
87 		 * wait a bit to be sure the other threads get
88 		 * scheduled and wait on cvar0.
89 		 */
90 		if (!(id == 0 && i == 0)) {
91 			zassert_equal(0, pthread_cond_wait(&cvar0, &lock), "");
92 		} else {
93 			pthread_mutex_unlock(&lock);
94 			usleep(USEC_PER_MSEC * 500U);
95 			pthread_mutex_lock(&lock);
96 		}
97 
98 		/* Claim ownership, then try really hard to give someone
99 		 * else a shot at hitting this if they are racing.
100 		 */
101 		curr_bounce_thread = id;
102 		for (j = 0; j < 1000; j++) {
103 			if (curr_bounce_thread != id) {
104 				printk("Racing bounce threads\n");
105 				bounce_failed = 1;
106 				sem_post(&main_sem);
107 				pthread_mutex_unlock(&lock);
108 				return NULL;
109 			}
110 			sched_yield();
111 		}
112 
113 		/* Next one's turn, go back to the top and wait.  */
114 		pthread_cond_signal(&cvar0);
115 		pthread_mutex_unlock(&lock);
116 	}
117 
118 	/* Signal we are complete to main(), then let it wake us up.  Note
119 	 * that we are using the same mutex with both cvar0 and cvar1,
120 	 * which is non-standard but kosher per POSIX (and it works fine
121 	 * in our implementation
122 	 */
123 	pthread_mutex_lock(&lock);
124 	bounce_done[id] = 1;
125 	sem_post(&main_sem);
126 	pthread_cond_wait(&cvar1, &lock);
127 	pthread_mutex_unlock(&lock);
128 
129 	/* Now just wait on the barrier.  Make sure no one else finished
130 	 * before we wait on it, then signal that we're done
131 	 */
132 	for (i = 0; i < N_THR_E; i++) {
133 		if (barrier_done[i]) {
134 			printk("Barrier exited early\n");
135 			barrier_failed = 1;
136 			sem_post(&main_sem);
137 		}
138 	}
139 	barrier_return[id] = pthread_barrier_wait(&barrier);
140 	barrier_done[id] = 1;
141 	sem_post(&main_sem);
142 	pthread_exit(p1);
143 
144 	return NULL;
145 }
146 
timedjoin_thread(void * p1)147 static void *timedjoin_thread(void *p1)
148 {
149 	int sleep_duration_ms = POINTER_TO_INT(p1);
150 
151 	usleep(USEC_PER_MSEC * sleep_duration_ms);
152 	return NULL;
153 }
154 
bounce_test_done(void)155 static int bounce_test_done(void)
156 {
157 	int i;
158 
159 	if (bounce_failed) {
160 		return 1;
161 	}
162 
163 	for (i = 0; i < N_THR_E; i++) {
164 		if (!bounce_done[i]) {
165 			return 0;
166 		}
167 	}
168 
169 	return 1;
170 }
171 
barrier_test_done(void)172 static int barrier_test_done(void)
173 {
174 	int i;
175 
176 	if (barrier_failed) {
177 		return 1;
178 	}
179 
180 	for (i = 0; i < N_THR_E; i++) {
181 		if (!barrier_done[i]) {
182 			return 0;
183 		}
184 	}
185 
186 	return 1;
187 }
188 
thread_top_term(void * p1)189 static void *thread_top_term(void *p1)
190 {
191 	pthread_t self;
192 	int policy, ret;
193 	int id = POINTER_TO_INT(p1);
194 	struct sched_param param, getschedparam;
195 
196 	param.sched_priority = N_THR_T - id;
197 
198 	self = pthread_self();
199 
200 	/* Change priority of thread */
201 	zassert_false(pthread_setschedparam(self, SCHED_RR, &param),
202 		      "Unable to set thread priority!");
203 
204 	zassert_false(pthread_getschedparam(self, &policy, &getschedparam),
205 		      "Unable to get thread priority!");
206 
207 	printk("Thread %d starting with a priority of %d\n", id, getschedparam.sched_priority);
208 
209 	if (id % 2) {
210 		ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
211 		zassert_false(ret, "Unable to set cancel state!");
212 	}
213 
214 	if (id >= DETACH_THR_ID) {
215 		zassert_ok(pthread_detach(self), "failed to set detach state");
216 		zassert_equal(pthread_detach(self), EINVAL, "re-detached thread!");
217 	}
218 
219 	printk("Cancelling thread %d\n", id);
220 	pthread_cancel(self);
221 	printk("Thread %d could not be cancelled\n", id);
222 	sleep(ONE_SECOND);
223 	pthread_exit(p1);
224 	return NULL;
225 }
226 
227 /* Test the internal priority conversion functions */
228 int zephyr_to_posix_priority(int z_prio, int *policy);
229 int posix_to_zephyr_priority(int priority, int policy);
ZTEST(pthread,test_pthread_priority_conversion)230 ZTEST(pthread, test_pthread_priority_conversion)
231 {
232 	/*
233 	 *    ZEPHYR [-CONFIG_NUM_COOP_PRIORITIES, -1]
234 	 *                       TO
235 	 * POSIX(FIFO) [0, CONFIG_NUM_COOP_PRIORITIES - 1]
236 	 */
237 	for (int z_prio = -CONFIG_NUM_COOP_PRIORITIES, prio = CONFIG_NUM_COOP_PRIORITIES - 1,
238 		 p_prio, policy;
239 	     z_prio <= -1; z_prio++, prio--) {
240 		p_prio = zephyr_to_posix_priority(z_prio, &policy);
241 		zassert_equal(policy, SCHED_FIFO);
242 		zassert_equal(p_prio, prio, "%d %d\n", p_prio, prio);
243 		zassert_equal(z_prio, posix_to_zephyr_priority(p_prio, SCHED_FIFO));
244 	}
245 
246 	/*
247 	 *  ZEPHYR [0, CONFIG_NUM_PREEMPT_PRIORITIES - 1]
248 	 *                      TO
249 	 * POSIX(RR) [0, CONFIG_NUM_PREEMPT_PRIORITIES - 1]
250 	 */
251 	for (int z_prio = 0, prio = CONFIG_NUM_PREEMPT_PRIORITIES - 1, p_prio, policy;
252 	     z_prio < CONFIG_NUM_PREEMPT_PRIORITIES; z_prio++, prio--) {
253 		p_prio = zephyr_to_posix_priority(z_prio, &policy);
254 		zassert_equal(policy, SCHED_RR);
255 		zassert_equal(p_prio, prio, "%d %d\n", p_prio, prio);
256 		zassert_equal(z_prio, posix_to_zephyr_priority(p_prio, SCHED_RR));
257 	}
258 }
259 
ZTEST(pthread,test_pthread_execution)260 ZTEST(pthread, test_pthread_execution)
261 {
262 	int i, ret;
263 	pthread_t newthread[N_THR_E];
264 	void *retval;
265 	int serial_threads = 0;
266 	static const char thr_name[] = "thread name";
267 	char thr_name_buf[CONFIG_THREAD_MAX_NAME_LEN];
268 
269 	/*
270 	 * initialize barriers the standard way after deprecating
271 	 * PTHREAD_BARRIER_DEFINE().
272 	 */
273 	zassert_ok(pthread_barrier_init(&barrier, NULL, N_THR_E));
274 
275 	sem_init(&main_sem, 0, 1);
276 
277 	/* TESTPOINT: Try getting name of NULL thread (aka uninitialized
278 	 * thread var).
279 	 */
280 	ret = pthread_getname_np(PTHREAD_INVALID, thr_name_buf, sizeof(thr_name_buf));
281 	zassert_equal(ret, ESRCH, "uninitialized getname!");
282 
283 	for (i = 0; i < N_THR_E; i++) {
284 		ret = pthread_create(&newthread[i], NULL, thread_top_exec, INT_TO_POINTER(i));
285 	}
286 
287 	/* TESTPOINT: Try setting name of NULL thread (aka uninitialized
288 	 * thread var).
289 	 */
290 	ret = pthread_setname_np(PTHREAD_INVALID, thr_name);
291 	zassert_equal(ret, ESRCH, "uninitialized setname!");
292 
293 	/* TESTPOINT: Try getting thread name with no buffer */
294 	ret = pthread_getname_np(newthread[0], NULL, sizeof(thr_name_buf));
295 	zassert_equal(ret, EINVAL, "uninitialized getname!");
296 
297 	/* TESTPOINT: Try setting thread name with no buffer */
298 	ret = pthread_setname_np(newthread[0], NULL);
299 	zassert_equal(ret, EINVAL, "uninitialized setname!");
300 
301 	/* TESTPOINT: Try setting thread name */
302 	ret = pthread_setname_np(newthread[0], thr_name);
303 	zassert_false(ret, "Set thread name failed!");
304 
305 	/* TESTPOINT: Try getting thread name */
306 	ret = pthread_getname_np(newthread[0], thr_name_buf, sizeof(thr_name_buf));
307 	zassert_false(ret, "Get thread name failed!");
308 
309 	/* TESTPOINT: Thread names match */
310 	ret = strncmp(thr_name, thr_name_buf, MIN(strlen(thr_name), strlen(thr_name_buf)));
311 	zassert_false(ret, "Thread names don't match!");
312 
313 	while (!bounce_test_done()) {
314 		sem_wait(&main_sem);
315 	}
316 
317 	/* TESTPOINT: Check if bounce test passes */
318 	zassert_false(bounce_failed, "Bounce test failed");
319 
320 	printk("Bounce test OK\n");
321 
322 	/* Wake up the worker threads */
323 	pthread_mutex_lock(&lock);
324 	pthread_cond_broadcast(&cvar1);
325 	pthread_mutex_unlock(&lock);
326 
327 	while (!barrier_test_done()) {
328 		sem_wait(&main_sem);
329 	}
330 
331 	/* TESTPOINT: Check if barrier test passes */
332 	zassert_false(barrier_failed, "Barrier test failed");
333 
334 	for (i = 0; i < N_THR_E; i++) {
335 		pthread_join(newthread[i], &retval);
336 	}
337 
338 	for (i = 0; i < N_THR_E; i++) {
339 		if (barrier_return[i] == PTHREAD_BARRIER_SERIAL_THREAD) {
340 			++serial_threads;
341 		}
342 	}
343 
344 	/* TESTPOINT: Check only one PTHREAD_BARRIER_SERIAL_THREAD returned. */
345 	zassert_true(serial_threads == 1, "Bungled barrier return value(s)");
346 
347 	printk("Barrier test OK\n");
348 }
349 
ZTEST(pthread,test_pthread_termination)350 ZTEST(pthread, test_pthread_termination)
351 {
352 	int32_t i, ret;
353 	pthread_t newthread[N_THR_T] = {0};
354 	void *retval;
355 
356 	/* Creating 4 threads */
357 	for (i = 0; i < N_THR_T; i++) {
358 		zassert_ok(pthread_create(&newthread[i], NULL, thread_top_term, INT_TO_POINTER(i)));
359 	}
360 
361 	/* TESTPOINT: Try setting invalid cancel state to current thread */
362 	ret = pthread_setcancelstate(PTHREAD_CANCEL_INVALID, NULL);
363 	zassert_equal(ret, EINVAL, "invalid cancel state set!");
364 
365 	for (i = 0; i < N_THR_T; i++) {
366 		if (i < DETACH_THR_ID) {
367 			zassert_ok(pthread_join(newthread[i], &retval));
368 		}
369 	}
370 
371 	/* TESTPOINT: Test for deadlock */
372 	ret = pthread_join(pthread_self(), &retval);
373 	zassert_equal(ret, EDEADLK, "thread joined with self inexplicably!");
374 
375 	/* TESTPOINT: Try canceling a terminated thread */
376 	ret = pthread_cancel(newthread[0]);
377 	zassert_equal(ret, ESRCH, "cancelled a terminated thread!");
378 }
379 
ZTEST(pthread,test_pthread_tryjoin)380 ZTEST(pthread, test_pthread_tryjoin)
381 {
382 	pthread_t th = {0};
383 	int sleep_duration_ms = 200;
384 	void *retval;
385 
386 	/* Creating a thread that exits after 200ms*/
387 	zassert_ok(pthread_create(&th, NULL, timedjoin_thread, INT_TO_POINTER(sleep_duration_ms)));
388 
389 	/* Attempting to join, when thread is still running, should fail */
390 	usleep(USEC_PER_MSEC * sleep_duration_ms / 2);
391 	zassert_equal(pthread_tryjoin_np(th, &retval), EBUSY);
392 
393 	/* Sleep so thread will exit */
394 	usleep(USEC_PER_MSEC * sleep_duration_ms);
395 
396 	/* Attempting to join without blocking should succeed now */
397 	zassert_ok(pthread_tryjoin_np(th, &retval));
398 }
399 
ZTEST(pthread,test_pthread_timedjoin)400 ZTEST(pthread, test_pthread_timedjoin)
401 {
402 	pthread_t th = {0};
403 	int sleep_duration_ms = 200;
404 	void *ret;
405 	struct timespec not_done;
406 	struct timespec done;
407 	struct timespec invalid[] = {
408 		{.tv_nsec = -1},
409 		{.tv_nsec = NSEC_PER_SEC},
410 	};
411 
412 	/* setup timespecs when the thread is still running and when it is done */
413 	clock_gettime(CLOCK_REALTIME, &not_done);
414 	clock_gettime(CLOCK_REALTIME, &done);
415 	not_done.tv_nsec += sleep_duration_ms / 2 * NSEC_PER_MSEC;
416 	done.tv_nsec += sleep_duration_ms * 1.5 * NSEC_PER_MSEC;
417 	while (not_done.tv_nsec >= NSEC_PER_SEC) {
418 		not_done.tv_sec++;
419 		not_done.tv_nsec -= NSEC_PER_SEC;
420 	}
421 	while (done.tv_nsec >= NSEC_PER_SEC) {
422 		done.tv_sec++;
423 		done.tv_nsec -= NSEC_PER_SEC;
424 	}
425 
426 	/* Creating a thread that exits after 200ms*/
427 	zassert_ok(pthread_create(&th, NULL, timedjoin_thread, INT_TO_POINTER(sleep_duration_ms)));
428 
429 	/* pthread_timedjoin-np must return EINVAL for invalid struct timespecs */
430 	zassert_equal(pthread_timedjoin_np(th, &ret, NULL), EINVAL);
431 	for (size_t i = 0; i < ARRAY_SIZE(invalid); ++i) {
432 		zassert_equal(pthread_timedjoin_np(th, &ret, &invalid[i]), EINVAL);
433 	}
434 
435 	/* Attempting to join with a timeout, when the thread is still running should fail */
436 	zassert_equal(pthread_timedjoin_np(th, &ret, &not_done), ETIMEDOUT);
437 
438 	/* Attempting to join with a timeout, when the thread is done, should succeed */
439 	zassert_ok(pthread_timedjoin_np(th, &ret, &done));
440 }
441 
create_thread1(void * p1)442 static void *create_thread1(void *p1)
443 {
444 	/* do nothing */
445 	return NULL;
446 }
447 
ZTEST(pthread,test_pthread_descriptor_leak)448 ZTEST(pthread, test_pthread_descriptor_leak)
449 {
450 	pthread_t pthread1;
451 
452 	/* If we are leaking descriptors, then this loop will never complete */
453 	for (size_t i = 0; i < CONFIG_POSIX_THREAD_THREADS_MAX * 2; ++i) {
454 		zassert_ok(pthread_create(&pthread1, NULL, create_thread1, NULL),
455 			   "unable to create thread %zu", i);
456 		zassert_ok(pthread_join(pthread1, NULL), "unable to join thread %zu", i);
457 	}
458 }
459 
ZTEST(pthread,test_pthread_equal)460 ZTEST(pthread, test_pthread_equal)
461 {
462 	zassert_true(pthread_equal(pthread_self(), pthread_self()));
463 	zassert_false(pthread_equal(pthread_self(), (pthread_t)4242));
464 }
465 
cleanup_handler(void * arg)466 static void cleanup_handler(void *arg)
467 {
468 	bool *boolp = (bool *)arg;
469 
470 	*boolp = true;
471 }
472 
test_pthread_cleanup_entry(void * arg)473 static void *test_pthread_cleanup_entry(void *arg)
474 {
475 	bool executed[2] = {0};
476 
477 	pthread_cleanup_push(cleanup_handler, &executed[0]);
478 	pthread_cleanup_push(cleanup_handler, &executed[1]);
479 	pthread_cleanup_pop(false);
480 	pthread_cleanup_pop(true);
481 
482 	zassert_true(executed[0]);
483 	zassert_false(executed[1]);
484 
485 	return NULL;
486 }
487 
ZTEST(pthread,test_pthread_cleanup)488 ZTEST(pthread, test_pthread_cleanup)
489 {
490 	pthread_t th;
491 
492 	zassert_ok(pthread_create(&th, NULL, test_pthread_cleanup_entry, NULL));
493 	zassert_ok(pthread_join(th, NULL));
494 }
495 
496 static bool testcancel_ignored;
497 static bool testcancel_failed;
498 
test_pthread_cancel_fn(void * arg)499 static void *test_pthread_cancel_fn(void *arg)
500 {
501 	zassert_ok(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL));
502 
503 	testcancel_ignored = false;
504 
505 	/* this should be ignored */
506 	pthread_testcancel();
507 
508 	testcancel_ignored = true;
509 
510 	/* this will mark it pending */
511 	zassert_ok(pthread_cancel(pthread_self()));
512 
513 	/* enable the thread to be cancelled */
514 	zassert_ok(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL));
515 
516 	testcancel_failed = false;
517 
518 	/* this should terminate the thread */
519 	pthread_testcancel();
520 
521 	testcancel_failed = true;
522 
523 	return NULL;
524 }
525 
ZTEST(pthread,test_pthread_testcancel)526 ZTEST(pthread, test_pthread_testcancel)
527 {
528 	pthread_t th;
529 
530 	zassert_ok(pthread_create(&th, NULL, test_pthread_cancel_fn, NULL));
531 	zassert_ok(pthread_join(th, NULL));
532 	zassert_true(testcancel_ignored);
533 	zassert_false(testcancel_failed);
534 }
535 
test_pthread_setschedprio_fn(void * arg)536 static void *test_pthread_setschedprio_fn(void *arg)
537 {
538 	int policy;
539 	int prio = 0;
540 	struct sched_param param;
541 	pthread_t self = pthread_self();
542 
543 	zassert_equal(pthread_setschedprio(self, PRIO_INVALID), EINVAL, "EINVAL was expected");
544 	zassert_equal(pthread_setschedprio(PTHREAD_INVALID, prio), ESRCH, "ESRCH was expected");
545 
546 	zassert_ok(pthread_setschedprio(self, prio));
547 	param.sched_priority = ~prio;
548 	zassert_ok(pthread_getschedparam(self, &policy, &param));
549 	zassert_equal(param.sched_priority, prio, "Priority unchanged");
550 
551 	return NULL;
552 }
553 
ZTEST(pthread,test_pthread_setschedprio)554 ZTEST(pthread, test_pthread_setschedprio)
555 {
556 	pthread_t th;
557 
558 	zassert_ok(pthread_create(&th, NULL, test_pthread_setschedprio_fn, NULL));
559 	zassert_ok(pthread_join(th, NULL));
560 }
561 
before(void * arg)562 static void before(void *arg)
563 {
564 	ARG_UNUSED(arg);
565 
566 	if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
567 		/* skip redundant testing if there is no thread pool / heap allocation */
568 		ztest_test_skip();
569 	}
570 }
571 
572 ZTEST_SUITE(pthread, NULL, NULL, before, NULL, NULL);
573