1 /*
2  * Copyright (c) 2018-2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <pthread.h>
8 #include <semaphore.h>
9 
10 #include <zephyr/sys/util.h>
11 #include <zephyr/ztest.h>
12 
13 #define DETACH_THR_ID 2
14 
15 #define N_THR_E 3
16 #define N_THR_T 4
17 #define BOUNCES 64
18 #define ONE_SECOND 1
19 
20 /* Macros to test invalid states */
21 #define PTHREAD_CANCEL_INVALID -1
22 #define SCHED_INVALID -1
23 #define PRIO_INVALID -1
24 #define PTHREAD_INVALID -1
25 
26 static void *thread_top_exec(void *p1);
27 static void *thread_top_term(void *p1);
28 
29 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
30 static pthread_cond_t cvar0 = PTHREAD_COND_INITIALIZER;
31 static pthread_cond_t cvar1 = PTHREAD_COND_INITIALIZER;
32 static pthread_barrier_t barrier;
33 
34 static sem_t main_sem;
35 
36 static int bounce_failed;
37 static int bounce_done[N_THR_E];
38 
39 static int curr_bounce_thread;
40 
41 static int barrier_failed;
42 static int barrier_done[N_THR_E];
43 static int barrier_return[N_THR_E];
44 
45 /* First phase bounces execution between two threads using a condition
46  * variable, continuously testing that no other thread is mucking with
47  * the protected state.  This ends with all threads going back to
48  * sleep on the condition variable and being woken by main() for the
49  * second phase.
50  *
51  * Second phase simply lines up all the threads on a barrier, verifies
52  * that none run until the last one enters, and that all run after the
53  * exit.
54  *
55  * Test success is signaled to main() using a traditional semaphore.
56  */
57 
thread_top_exec(void * p1)58 static void *thread_top_exec(void *p1)
59 {
60 	int i, j, id = (int) POINTER_TO_INT(p1);
61 	int policy;
62 	struct sched_param schedparam;
63 
64 	pthread_getschedparam(pthread_self(), &policy, &schedparam);
65 	printk("Thread %d starting with scheduling policy %d & priority %d\n",
66 		 id, policy, schedparam.sched_priority);
67 	/* Try a double-lock here to exercise the failing case of
68 	 * trylock.  We don't support RECURSIVE locks, so this is
69 	 * guaranteed to fail.
70 	 */
71 	pthread_mutex_lock(&lock);
72 
73 	if (!pthread_mutex_trylock(&lock)) {
74 		printk("pthread_mutex_trylock inexplicably succeeded\n");
75 		bounce_failed = 1;
76 	}
77 
78 	pthread_mutex_unlock(&lock);
79 
80 	for (i = 0; i < BOUNCES; i++) {
81 
82 		pthread_mutex_lock(&lock);
83 
84 		/* Wait for the current owner to signal us, unless we
85 		 * are the very first thread, in which case we need to
86 		 * wait a bit to be sure the other threads get
87 		 * scheduled and wait on cvar0.
88 		 */
89 		if (!(id == 0 && i == 0)) {
90 			zassert_equal(0, pthread_cond_wait(&cvar0, &lock), "");
91 		} else {
92 			pthread_mutex_unlock(&lock);
93 			usleep(USEC_PER_MSEC * 500U);
94 			pthread_mutex_lock(&lock);
95 		}
96 
97 		/* Claim ownership, then try really hard to give someone
98 		 * else a shot at hitting this if they are racing.
99 		 */
100 		curr_bounce_thread = id;
101 		for (j = 0; j < 1000; j++) {
102 			if (curr_bounce_thread != id) {
103 				printk("Racing bounce threads\n");
104 				bounce_failed = 1;
105 				sem_post(&main_sem);
106 				pthread_mutex_unlock(&lock);
107 				return NULL;
108 			}
109 			sched_yield();
110 		}
111 
112 		/* Next one's turn, go back to the top and wait.  */
113 		pthread_cond_signal(&cvar0);
114 		pthread_mutex_unlock(&lock);
115 	}
116 
117 	/* Signal we are complete to main(), then let it wake us up.  Note
118 	 * that we are using the same mutex with both cvar0 and cvar1,
119 	 * which is non-standard but kosher per POSIX (and it works fine
120 	 * in our implementation
121 	 */
122 	pthread_mutex_lock(&lock);
123 	bounce_done[id] = 1;
124 	sem_post(&main_sem);
125 	pthread_cond_wait(&cvar1, &lock);
126 	pthread_mutex_unlock(&lock);
127 
128 	/* Now just wait on the barrier.  Make sure no one else finished
129 	 * before we wait on it, then signal that we're done
130 	 */
131 	for (i = 0; i < N_THR_E; i++) {
132 		if (barrier_done[i]) {
133 			printk("Barrier exited early\n");
134 			barrier_failed = 1;
135 			sem_post(&main_sem);
136 		}
137 	}
138 	barrier_return[id] = pthread_barrier_wait(&barrier);
139 	barrier_done[id] = 1;
140 	sem_post(&main_sem);
141 	pthread_exit(p1);
142 
143 	return NULL;
144 }
145 
timedjoin_thread(void * p1)146 static void *timedjoin_thread(void *p1)
147 {
148 	int sleep_duration_ms = POINTER_TO_INT(p1);
149 
150 	usleep(USEC_PER_MSEC * sleep_duration_ms);
151 	return NULL;
152 }
153 
bounce_test_done(void)154 static int bounce_test_done(void)
155 {
156 	int i;
157 
158 	if (bounce_failed) {
159 		return 1;
160 	}
161 
162 	for (i = 0; i < N_THR_E; i++) {
163 		if (!bounce_done[i]) {
164 			return 0;
165 		}
166 	}
167 
168 	return 1;
169 }
170 
barrier_test_done(void)171 static int barrier_test_done(void)
172 {
173 	int i;
174 
175 	if (barrier_failed) {
176 		return 1;
177 	}
178 
179 	for (i = 0; i < N_THR_E; i++) {
180 		if (!barrier_done[i]) {
181 			return 0;
182 		}
183 	}
184 
185 	return 1;
186 }
187 
thread_top_term(void * p1)188 static void *thread_top_term(void *p1)
189 {
190 	pthread_t self;
191 	int policy, ret;
192 	int id = POINTER_TO_INT(p1);
193 	struct sched_param param, getschedparam;
194 
195 	param.sched_priority = N_THR_T - id;
196 
197 	self = pthread_self();
198 
199 	/* Change priority of thread */
200 	zassert_false(pthread_setschedparam(self, SCHED_RR, &param),
201 		      "Unable to set thread priority!");
202 
203 	zassert_false(pthread_getschedparam(self, &policy, &getschedparam),
204 			"Unable to get thread priority!");
205 
206 	printk("Thread %d starting with a priority of %d\n",
207 			id,
208 			getschedparam.sched_priority);
209 
210 	if (id % 2) {
211 		ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
212 		zassert_false(ret, "Unable to set cancel state!");
213 	}
214 
215 	if (id >= DETACH_THR_ID) {
216 		zassert_ok(pthread_detach(self), "failed to set detach state");
217 		zassert_equal(pthread_detach(self), EINVAL, "re-detached thread!");
218 	}
219 
220 	printk("Cancelling thread %d\n", id);
221 	pthread_cancel(self);
222 	printk("Thread %d could not be cancelled\n", id);
223 	sleep(ONE_SECOND);
224 	pthread_exit(p1);
225 	return NULL;
226 }
227 
228 /* Test the internal priority conversion functions */
229 int zephyr_to_posix_priority(int z_prio, int *policy);
230 int posix_to_zephyr_priority(int priority, int policy);
ZTEST(pthread,test_pthread_priority_conversion)231 ZTEST(pthread, test_pthread_priority_conversion)
232 {
233 	/*
234 	 *    ZEPHYR [-CONFIG_NUM_COOP_PRIORITIES, -1]
235 	 *                       TO
236 	 * POSIX(FIFO) [0, CONFIG_NUM_COOP_PRIORITIES - 1]
237 	 */
238 	for (int z_prio = -CONFIG_NUM_COOP_PRIORITIES, prio = CONFIG_NUM_COOP_PRIORITIES - 1,
239 		 p_prio, policy;
240 	     z_prio <= -1; z_prio++, prio--) {
241 		p_prio = zephyr_to_posix_priority(z_prio, &policy);
242 		zassert_equal(policy, SCHED_FIFO);
243 		zassert_equal(p_prio, prio, "%d %d\n", p_prio, prio);
244 		zassert_equal(z_prio, posix_to_zephyr_priority(p_prio, SCHED_FIFO));
245 	}
246 
247 	/*
248 	 *  ZEPHYR [0, CONFIG_NUM_PREEMPT_PRIORITIES - 1]
249 	 *                      TO
250 	 * POSIX(RR) [0, CONFIG_NUM_PREEMPT_PRIORITIES - 1]
251 	 */
252 	for (int z_prio = 0, prio = CONFIG_NUM_PREEMPT_PRIORITIES - 1, p_prio, policy;
253 	     z_prio < CONFIG_NUM_PREEMPT_PRIORITIES; z_prio++, prio--) {
254 		p_prio = zephyr_to_posix_priority(z_prio, &policy);
255 		zassert_equal(policy, SCHED_RR);
256 		zassert_equal(p_prio, prio, "%d %d\n", p_prio, prio);
257 		zassert_equal(z_prio, posix_to_zephyr_priority(p_prio, SCHED_RR));
258 	}
259 }
260 
ZTEST(pthread,test_pthread_execution)261 ZTEST(pthread, test_pthread_execution)
262 {
263 	int i, ret;
264 	pthread_t newthread[N_THR_E];
265 	void *retval;
266 	int serial_threads = 0;
267 	static const char thr_name[] = "thread name";
268 	char thr_name_buf[CONFIG_THREAD_MAX_NAME_LEN];
269 
270 	/*
271 	 * initialize barriers the standard way after deprecating
272 	 * PTHREAD_BARRIER_DEFINE().
273 	 */
274 	zassert_ok(pthread_barrier_init(&barrier, NULL, N_THR_E));
275 
276 	sem_init(&main_sem, 0, 1);
277 
278 	/* TESTPOINT: Try getting name of NULL thread (aka uninitialized
279 	 * thread var).
280 	 */
281 	ret = pthread_getname_np(PTHREAD_INVALID, thr_name_buf, sizeof(thr_name_buf));
282 	zassert_equal(ret, ESRCH, "uninitialized getname!");
283 
284 	for (i = 0; i < N_THR_E; i++) {
285 		ret = pthread_create(&newthread[i], NULL, thread_top_exec, INT_TO_POINTER(i));
286 	}
287 
288 	/* TESTPOINT: Try setting name of NULL thread (aka uninitialized
289 	 * thread var).
290 	 */
291 	ret = pthread_setname_np(PTHREAD_INVALID, thr_name);
292 	zassert_equal(ret, ESRCH, "uninitialized setname!");
293 
294 	/* TESTPOINT: Try getting thread name with no buffer */
295 	ret = pthread_getname_np(newthread[0], NULL, sizeof(thr_name_buf));
296 	zassert_equal(ret, EINVAL, "uninitialized getname!");
297 
298 	/* TESTPOINT: Try setting thread name with no buffer */
299 	ret = pthread_setname_np(newthread[0], NULL);
300 	zassert_equal(ret, EINVAL, "uninitialized setname!");
301 
302 	/* TESTPOINT: Try setting thread name */
303 	ret = pthread_setname_np(newthread[0], thr_name);
304 	zassert_false(ret, "Set thread name failed!");
305 
306 	/* TESTPOINT: Try getting thread name */
307 	ret = pthread_getname_np(newthread[0], thr_name_buf,
308 				 sizeof(thr_name_buf));
309 	zassert_false(ret, "Get thread name failed!");
310 
311 	/* TESTPOINT: Thread names match */
312 	ret = strncmp(thr_name, thr_name_buf, MIN(strlen(thr_name), strlen(thr_name_buf)));
313 	zassert_false(ret, "Thread names don't match!");
314 
315 	while (!bounce_test_done()) {
316 		sem_wait(&main_sem);
317 	}
318 
319 	/* TESTPOINT: Check if bounce test passes */
320 	zassert_false(bounce_failed, "Bounce test failed");
321 
322 	printk("Bounce test OK\n");
323 
324 	/* Wake up the worker threads */
325 	pthread_mutex_lock(&lock);
326 	pthread_cond_broadcast(&cvar1);
327 	pthread_mutex_unlock(&lock);
328 
329 	while (!barrier_test_done()) {
330 		sem_wait(&main_sem);
331 	}
332 
333 	/* TESTPOINT: Check if barrier test passes */
334 	zassert_false(barrier_failed, "Barrier test failed");
335 
336 	for (i = 0; i < N_THR_E; i++) {
337 		pthread_join(newthread[i], &retval);
338 	}
339 
340 	for (i = 0; i < N_THR_E; i++) {
341 		if (barrier_return[i] == PTHREAD_BARRIER_SERIAL_THREAD) {
342 			++serial_threads;
343 		}
344 	}
345 
346 	/* TESTPOINT: Check only one PTHREAD_BARRIER_SERIAL_THREAD returned. */
347 	zassert_true(serial_threads == 1, "Bungled barrier return value(s)");
348 
349 	printk("Barrier test OK\n");
350 }
351 
ZTEST(pthread,test_pthread_termination)352 ZTEST(pthread, test_pthread_termination)
353 {
354 	int32_t i, ret;
355 	pthread_t newthread[N_THR_T] = {0};
356 	void *retval;
357 
358 	/* Creating 4 threads */
359 	for (i = 0; i < N_THR_T; i++) {
360 		zassert_ok(pthread_create(&newthread[i], NULL, thread_top_term, INT_TO_POINTER(i)));
361 	}
362 
363 	/* TESTPOINT: Try setting invalid cancel state to current thread */
364 	ret = pthread_setcancelstate(PTHREAD_CANCEL_INVALID, NULL);
365 	zassert_equal(ret, EINVAL, "invalid cancel state set!");
366 
367 	for (i = 0; i < N_THR_T; i++) {
368 		if (i < DETACH_THR_ID) {
369 			zassert_ok(pthread_join(newthread[i], &retval));
370 		}
371 	}
372 
373 	/* TESTPOINT: Test for deadlock */
374 	ret = pthread_join(pthread_self(), &retval);
375 	zassert_equal(ret, EDEADLK, "thread joined with self inexplicably!");
376 
377 	/* TESTPOINT: Try canceling a terminated thread */
378 	ret = pthread_cancel(newthread[0]);
379 	zassert_equal(ret, ESRCH, "cancelled a terminated thread!");
380 }
381 
ZTEST(pthread,test_pthread_tryjoin)382 ZTEST(pthread, test_pthread_tryjoin)
383 {
384 	pthread_t th = {0};
385 	int sleep_duration_ms = 200;
386 	void *retval;
387 
388 	/* Creating a thread that exits after 200ms*/
389 	zassert_ok(pthread_create(&th, NULL, timedjoin_thread, INT_TO_POINTER(sleep_duration_ms)));
390 
391 	/* Attempting to join, when thread is still running, should fail */
392 	usleep(USEC_PER_MSEC * sleep_duration_ms / 2);
393 	zassert_equal(pthread_tryjoin_np(th, &retval), EBUSY);
394 
395 	/* Sleep so thread will exit */
396 	usleep(USEC_PER_MSEC * sleep_duration_ms);
397 
398 	/* Attempting to join without blocking should succeed now */
399 	zassert_ok(pthread_tryjoin_np(th, &retval));
400 }
401 
ZTEST(pthread,test_pthread_timedjoin)402 ZTEST(pthread, test_pthread_timedjoin)
403 {
404 	pthread_t th = {0};
405 	int sleep_duration_ms = 200;
406 	void *ret;
407 	struct timespec not_done;
408 	struct timespec done;
409 	struct timespec invalid[] = {
410 		[0] = {.tv_sec = -1},
411 		[1] = {.tv_nsec = -1},
412 		[2] = {.tv_nsec = NSEC_PER_SEC},
413 	};
414 
415 	/* setup timespecs when the thread is still running and when it is done */
416 	clock_gettime(CLOCK_MONOTONIC, &not_done);
417 	clock_gettime(CLOCK_MONOTONIC, &done);
418 	not_done.tv_nsec += sleep_duration_ms / 2 * NSEC_PER_MSEC;
419 	done.tv_nsec += sleep_duration_ms * 1.5 * NSEC_PER_MSEC;
420 	while (not_done.tv_nsec >= NSEC_PER_SEC) {
421 		not_done.tv_sec++;
422 		not_done.tv_nsec -= NSEC_PER_SEC;
423 	}
424 	while (done.tv_nsec >= NSEC_PER_SEC) {
425 		done.tv_sec++;
426 		done.tv_nsec -= NSEC_PER_SEC;
427 	}
428 
429 	/* Creating a thread that exits after 200ms*/
430 	zassert_ok(pthread_create(&th, NULL, timedjoin_thread, INT_TO_POINTER(sleep_duration_ms)));
431 
432 	/* pthread_timedjoin-np must return -EINVAL for invalid struct timespecs */
433 	zassert_equal(pthread_timedjoin_np(th, &ret, NULL), EINVAL);
434 	for (size_t i = 0; i < ARRAY_SIZE(invalid); ++i) {
435 		zassert_equal(pthread_timedjoin_np(th, &ret, &invalid[i]), EINVAL);
436 	}
437 
438 	/* Attempting to join with a timeout, when the thread is still running should fail */
439 	zassert_equal(pthread_timedjoin_np(th, &ret, &not_done), ETIMEDOUT);
440 
441 	/* Attempting to join with a timeout, when the thread is done, should succeed */
442 	zassert_ok(pthread_timedjoin_np(th, &ret, &done));
443 }
444 
create_thread1(void * p1)445 static void *create_thread1(void *p1)
446 {
447 	/* do nothing */
448 	return NULL;
449 }
450 
ZTEST(pthread,test_pthread_descriptor_leak)451 ZTEST(pthread, test_pthread_descriptor_leak)
452 {
453 	pthread_t pthread1;
454 
455 	/* If we are leaking descriptors, then this loop will never complete */
456 	for (size_t i = 0; i < CONFIG_POSIX_THREAD_THREADS_MAX * 2; ++i) {
457 		zassert_ok(pthread_create(&pthread1, NULL, create_thread1, NULL),
458 			   "unable to create thread %zu", i);
459 		zassert_ok(pthread_join(pthread1, NULL), "unable to join thread %zu", i);
460 	}
461 }
462 
ZTEST(pthread,test_sched_getparam)463 ZTEST(pthread, test_sched_getparam)
464 {
465 	struct sched_param param;
466 	int rc = sched_getparam(0, &param);
467 	int err = errno;
468 
469 	zassert_true((rc == -1 && err == ENOSYS));
470 }
471 
ZTEST(pthread,test_sched_getscheduler)472 ZTEST(pthread, test_sched_getscheduler)
473 {
474 	int rc = sched_getscheduler(0);
475 	int err = errno;
476 
477 	zassert_true((rc == -1 && err == ENOSYS));
478 }
ZTEST(pthread,test_sched_setparam)479 ZTEST(pthread, test_sched_setparam)
480 {
481 	struct sched_param param = {
482 		.sched_priority = 2,
483 	};
484 	int rc = sched_setparam(0, &param);
485 	int err = errno;
486 
487 	zassert_true((rc == -1 && err == ENOSYS));
488 }
489 
ZTEST(pthread,test_sched_setscheduler)490 ZTEST(pthread, test_sched_setscheduler)
491 {
492 	struct sched_param param = {
493 		.sched_priority = 2,
494 	};
495 	int policy = 0;
496 	int rc = sched_setscheduler(0, policy, &param);
497 	int err = errno;
498 
499 	zassert_true((rc == -1 && err == ENOSYS));
500 }
501 
ZTEST(pthread,test_sched_rr_get_interval)502 ZTEST(pthread, test_sched_rr_get_interval)
503 {
504 	struct timespec interval = {
505 		.tv_sec = 0,
506 		.tv_nsec = 0,
507 	};
508 	int rc = sched_rr_get_interval(0, &interval);
509 	int err = errno;
510 
511 	zassert_true((rc == -1 && err == ENOSYS));
512 }
513 
ZTEST(pthread,test_pthread_equal)514 ZTEST(pthread, test_pthread_equal)
515 {
516 	zassert_true(pthread_equal(pthread_self(), pthread_self()));
517 	zassert_false(pthread_equal(pthread_self(), (pthread_t)4242));
518 }
519 
cleanup_handler(void * arg)520 static void cleanup_handler(void *arg)
521 {
522 	bool *boolp = (bool *)arg;
523 
524 	*boolp = true;
525 }
526 
test_pthread_cleanup_entry(void * arg)527 static void *test_pthread_cleanup_entry(void *arg)
528 {
529 	bool executed[2] = {0};
530 
531 	pthread_cleanup_push(cleanup_handler, &executed[0]);
532 	pthread_cleanup_push(cleanup_handler, &executed[1]);
533 	pthread_cleanup_pop(false);
534 	pthread_cleanup_pop(true);
535 
536 	zassert_true(executed[0]);
537 	zassert_false(executed[1]);
538 
539 	return NULL;
540 }
541 
ZTEST(pthread,test_pthread_cleanup)542 ZTEST(pthread, test_pthread_cleanup)
543 {
544 	pthread_t th;
545 
546 	zassert_ok(pthread_create(&th, NULL, test_pthread_cleanup_entry, NULL));
547 	zassert_ok(pthread_join(th, NULL));
548 }
549 
550 static bool testcancel_ignored;
551 static bool testcancel_failed;
552 
test_pthread_cancel_fn(void * arg)553 static void *test_pthread_cancel_fn(void *arg)
554 {
555 	zassert_ok(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL));
556 
557 	testcancel_ignored = false;
558 
559 	/* this should be ignored */
560 	pthread_testcancel();
561 
562 	testcancel_ignored = true;
563 
564 	/* this will mark it pending */
565 	zassert_ok(pthread_cancel(pthread_self()));
566 
567 	/* enable the thread to be cancelled */
568 	zassert_ok(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL));
569 
570 	testcancel_failed = false;
571 
572 	/* this should terminate the thread */
573 	pthread_testcancel();
574 
575 	testcancel_failed = true;
576 
577 	return NULL;
578 }
579 
ZTEST(pthread,test_pthread_testcancel)580 ZTEST(pthread, test_pthread_testcancel)
581 {
582 	pthread_t th;
583 
584 	zassert_ok(pthread_create(&th, NULL, test_pthread_cancel_fn, NULL));
585 	zassert_ok(pthread_join(th, NULL));
586 	zassert_true(testcancel_ignored);
587 	zassert_false(testcancel_failed);
588 }
589 
test_pthread_setschedprio_fn(void * arg)590 static void *test_pthread_setschedprio_fn(void *arg)
591 {
592 	int policy;
593 	int prio = 0;
594 	struct sched_param param;
595 	pthread_t self = pthread_self();
596 
597 	zassert_equal(pthread_setschedprio(self, PRIO_INVALID), EINVAL, "EINVAL was expected");
598 	zassert_equal(pthread_setschedprio(PTHREAD_INVALID, prio), ESRCH, "ESRCH was expected");
599 
600 	zassert_ok(pthread_setschedprio(self, prio));
601 	param.sched_priority = ~prio;
602 	zassert_ok(pthread_getschedparam(self, &policy, &param));
603 	zassert_equal(param.sched_priority, prio, "Priority unchanged");
604 
605 	return NULL;
606 }
607 
ZTEST(pthread,test_pthread_setschedprio)608 ZTEST(pthread, test_pthread_setschedprio)
609 {
610 	pthread_t th;
611 
612 	zassert_ok(pthread_create(&th, NULL, test_pthread_setschedprio_fn, NULL));
613 	zassert_ok(pthread_join(th, NULL));
614 }
615 
before(void * arg)616 static void before(void *arg)
617 {
618 	ARG_UNUSED(arg);
619 
620 	if (!IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
621 		/* skip redundant testing if there is no thread pool / heap allocation */
622 		ztest_test_skip();
623 	}
624 }
625 
626 ZTEST_SUITE(pthread, NULL, NULL, before, NULL, NULL);
627