1 /*
2  * Copyright (c) 2018 Intel Corporation
3  * Copyright (c) 2023 Meta
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include "posix_internal.h"
9 #include "pthread_sched.h"
10 
11 #include <stdio.h>
12 
13 #include <zephyr/init.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/logging/log.h>
16 #include <zephyr/sys/atomic.h>
17 #include <zephyr/posix/pthread.h>
18 #include <zephyr/posix/unistd.h>
19 #include <zephyr/sys/sem.h>
20 #include <zephyr/sys/slist.h>
21 #include <zephyr/sys/util.h>
22 
23 #define ZEPHYR_TO_POSIX_PRIORITY(_zprio)                                                           \
24 	(((_zprio) < 0) ? (-1 * ((_zprio) + 1)) : (CONFIG_NUM_PREEMPT_PRIORITIES - (_zprio)-1))
25 
26 #define POSIX_TO_ZEPHYR_PRIORITY(_prio, _pol)                                                      \
27 	(((_pol) == SCHED_FIFO) ? (-1 * ((_prio) + 1))                                             \
28 				: (CONFIG_NUM_PREEMPT_PRIORITIES - (_prio)-1))
29 
30 #define DEFAULT_PTHREAD_PRIORITY                                                                   \
31 	POSIX_TO_ZEPHYR_PRIORITY(K_LOWEST_APPLICATION_THREAD_PRIO, DEFAULT_PTHREAD_POLICY)
32 #define DEFAULT_PTHREAD_POLICY (IS_ENABLED(CONFIG_PREEMPT_ENABLED) ? SCHED_RR : SCHED_FIFO)
33 
34 #define PTHREAD_STACK_MAX BIT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS)
35 #define PTHREAD_GUARD_MAX BIT_MASK(CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS)
36 
37 LOG_MODULE_REGISTER(pthread, CONFIG_PTHREAD_LOG_LEVEL);
38 
39 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
40 #define DYNAMIC_STACK_SIZE CONFIG_DYNAMIC_THREAD_STACK_SIZE
41 #else
42 #define DYNAMIC_STACK_SIZE 0
43 #endif
44 
__get_attr_stacksize(const struct posix_thread_attr * attr)45 static inline size_t __get_attr_stacksize(const struct posix_thread_attr *attr)
46 {
47 	return attr->stacksize + 1;
48 }
49 
__set_attr_stacksize(struct posix_thread_attr * attr,size_t stacksize)50 static inline void __set_attr_stacksize(struct posix_thread_attr *attr, size_t stacksize)
51 {
52 	attr->stacksize = stacksize - 1;
53 }
54 
55 struct __pthread_cleanup {
56 	void (*routine)(void *arg);
57 	void *arg;
58 	sys_snode_t node;
59 };
60 
61 enum posix_thread_qid {
62 	/* ready to be started via pthread_create() */
63 	POSIX_THREAD_READY_Q,
64 	/* running */
65 	POSIX_THREAD_RUN_Q,
66 	/* exited (either joinable or detached) */
67 	POSIX_THREAD_DONE_Q,
68 	/* invalid */
69 	POSIX_THREAD_INVALID_Q,
70 };
71 
72 /* only 2 bits in struct posix_thread_attr for schedpolicy */
73 BUILD_ASSERT(SCHED_OTHER < BIT(2) && SCHED_FIFO < BIT(2) && SCHED_RR < BIT(2));
74 
75 BUILD_ASSERT((PTHREAD_CREATE_DETACHED == 0 || PTHREAD_CREATE_JOINABLE == 0) &&
76 	     (PTHREAD_CREATE_DETACHED == 1 || PTHREAD_CREATE_JOINABLE == 1));
77 
78 BUILD_ASSERT((PTHREAD_CANCEL_ENABLE == 0 || PTHREAD_CANCEL_DISABLE == 0) &&
79 	     (PTHREAD_CANCEL_ENABLE == 1 || PTHREAD_CANCEL_DISABLE == 1));
80 
81 BUILD_ASSERT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS + CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS <=
82 	     32);
83 
84 int64_t timespec_to_timeoutms(const struct timespec *abstime);
85 static void posix_thread_recycle(void);
86 static sys_dlist_t posix_thread_q[] = {
87 	SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_READY_Q]),
88 	SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_RUN_Q]),
89 	SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_DONE_Q]),
90 };
91 static struct posix_thread posix_thread_pool[CONFIG_MAX_PTHREAD_COUNT];
92 static SYS_SEM_DEFINE(pthread_pool_lock, 1, 1);
93 static int pthread_concurrency;
94 
posix_thread_q_set(struct posix_thread * t,enum posix_thread_qid qid)95 static inline void posix_thread_q_set(struct posix_thread *t, enum posix_thread_qid qid)
96 {
97 	switch (qid) {
98 	case POSIX_THREAD_READY_Q:
99 	case POSIX_THREAD_RUN_Q:
100 	case POSIX_THREAD_DONE_Q:
101 		sys_dlist_append(&posix_thread_q[qid], &t->q_node);
102 		t->qid = qid;
103 		break;
104 	default:
105 		__ASSERT(false, "cannot set invalid qid %d for posix thread %p", qid, t);
106 		break;
107 	}
108 }
109 
posix_thread_q_get(struct posix_thread * t)110 static inline enum posix_thread_qid posix_thread_q_get(struct posix_thread *t)
111 {
112 	switch (t->qid) {
113 	case POSIX_THREAD_READY_Q:
114 	case POSIX_THREAD_RUN_Q:
115 	case POSIX_THREAD_DONE_Q:
116 		return t->qid;
117 	default:
118 		__ASSERT(false, "posix thread %p has invalid qid: %d", t, t->qid);
119 		return POSIX_THREAD_INVALID_Q;
120 	}
121 }
122 
123 /*
124  * We reserve the MSB to mark a pthread_t as initialized (from the
125  * perspective of the application). With a linear space, this means that
126  * the theoretical pthread_t range is [0,2147483647].
127  */
128 BUILD_ASSERT(CONFIG_POSIX_THREAD_THREADS_MAX < PTHREAD_OBJ_MASK_INIT,
129 	     "CONFIG_POSIX_THREAD_THREADS_MAX is too high");
130 
posix_thread_to_offset(struct posix_thread * t)131 static inline size_t posix_thread_to_offset(struct posix_thread *t)
132 {
133 	return t - posix_thread_pool;
134 }
135 
get_posix_thread_idx(pthread_t pth)136 static inline size_t get_posix_thread_idx(pthread_t pth)
137 {
138 	return mark_pthread_obj_uninitialized(pth);
139 }
140 
to_posix_thread(pthread_t pthread)141 struct posix_thread *to_posix_thread(pthread_t pthread)
142 {
143 	struct posix_thread *t;
144 	bool actually_initialized;
145 	size_t bit = get_posix_thread_idx(pthread);
146 
147 	/* if the provided thread does not claim to be initialized, its invalid */
148 	if (!is_pthread_obj_initialized(pthread)) {
149 		LOG_DBG("pthread is not initialized (%x)", pthread);
150 		return NULL;
151 	}
152 
153 	if (bit >= ARRAY_SIZE(posix_thread_pool)) {
154 		LOG_DBG("Invalid pthread (%x)", pthread);
155 		return NULL;
156 	}
157 
158 	t = &posix_thread_pool[bit];
159 
160 	/*
161 	 * Denote a pthread as "initialized" (i.e. allocated) if it is not in ready_q.
162 	 * This differs from other posix object allocation strategies because they use
163 	 * a bitarray to indicate whether an object has been allocated.
164 	 */
165 	actually_initialized = !(posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
166 				 (posix_thread_q_get(t) == POSIX_THREAD_DONE_Q &&
167 				  t->attr.detachstate == PTHREAD_CREATE_DETACHED));
168 
169 	if (!actually_initialized) {
170 		LOG_DBG("Pthread claims to be initialized (%x)", pthread);
171 		return NULL;
172 	}
173 
174 	return &posix_thread_pool[bit];
175 }
176 
pthread_self(void)177 pthread_t pthread_self(void)
178 {
179 	size_t bit;
180 	struct posix_thread *t;
181 
182 	t = (struct posix_thread *)CONTAINER_OF(k_current_get(), struct posix_thread, thread);
183 	bit = posix_thread_to_offset(t);
184 
185 	return mark_pthread_obj_initialized(bit);
186 }
187 
pthread_equal(pthread_t pt1,pthread_t pt2)188 int pthread_equal(pthread_t pt1, pthread_t pt2)
189 {
190 	return (pt1 == pt2);
191 }
192 
__z_pthread_cleanup_init(struct __pthread_cleanup * c,void (* routine)(void * arg),void * arg)193 static inline void __z_pthread_cleanup_init(struct __pthread_cleanup *c, void (*routine)(void *arg),
194 					    void *arg)
195 {
196 	*c = (struct __pthread_cleanup){
197 		.routine = routine,
198 		.arg = arg,
199 		.node = {0},
200 	};
201 }
202 
__z_pthread_cleanup_push(void * cleanup[3],void (* routine)(void * arg),void * arg)203 void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void *arg)
204 {
205 	struct posix_thread *t = NULL;
206 	struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup;
207 
208 	SYS_SEM_LOCK(&pthread_pool_lock) {
209 		t = to_posix_thread(pthread_self());
210 		BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c));
211 		__ASSERT_NO_MSG(t != NULL);
212 		__ASSERT_NO_MSG(c != NULL);
213 		__ASSERT_NO_MSG(routine != NULL);
214 		__z_pthread_cleanup_init(c, routine, arg);
215 		sys_slist_prepend(&t->cleanup_list, &c->node);
216 	}
217 }
218 
__z_pthread_cleanup_pop(int execute)219 void __z_pthread_cleanup_pop(int execute)
220 {
221 	sys_snode_t *node;
222 	struct __pthread_cleanup *c = NULL;
223 	struct posix_thread *t = NULL;
224 
225 	SYS_SEM_LOCK(&pthread_pool_lock) {
226 		t = to_posix_thread(pthread_self());
227 		__ASSERT_NO_MSG(t != NULL);
228 		node = sys_slist_get(&t->cleanup_list);
229 		__ASSERT_NO_MSG(node != NULL);
230 		c = CONTAINER_OF(node, struct __pthread_cleanup, node);
231 		__ASSERT_NO_MSG(c != NULL);
232 		__ASSERT_NO_MSG(c->routine != NULL);
233 	}
234 	if (execute) {
235 		c->routine(c->arg);
236 	}
237 }
238 
is_posix_policy_prio_valid(int priority,int policy)239 static bool is_posix_policy_prio_valid(int priority, int policy)
240 {
241 	if (priority >= posix_sched_priority_min(policy) &&
242 	    priority <= posix_sched_priority_max(policy)) {
243 		return true;
244 	}
245 
246 	LOG_DBG("Invalid priority %d and / or policy %d", priority, policy);
247 
248 	return false;
249 }
250 
251 /* Non-static so that they can be tested in ztest */
zephyr_to_posix_priority(int z_prio,int * policy)252 int zephyr_to_posix_priority(int z_prio, int *policy)
253 {
254 	int priority;
255 
256 	if (z_prio < 0) {
257 		__ASSERT_NO_MSG(-z_prio <= CONFIG_NUM_COOP_PRIORITIES);
258 	} else {
259 		__ASSERT_NO_MSG(z_prio < CONFIG_NUM_PREEMPT_PRIORITIES);
260 	}
261 
262 	*policy = (z_prio < 0) ? SCHED_FIFO : SCHED_RR;
263 	priority = ZEPHYR_TO_POSIX_PRIORITY(z_prio);
264 	__ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, *policy));
265 
266 	return priority;
267 }
268 
269 /* Non-static so that they can be tested in ztest */
posix_to_zephyr_priority(int priority,int policy)270 int posix_to_zephyr_priority(int priority, int policy)
271 {
272 	__ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, policy));
273 
274 	return POSIX_TO_ZEPHYR_PRIORITY(priority, policy);
275 }
276 
__attr_is_runnable(const struct posix_thread_attr * attr)277 static bool __attr_is_runnable(const struct posix_thread_attr *attr)
278 {
279 	size_t stacksize;
280 
281 	if (attr == NULL || attr->stack == NULL) {
282 		LOG_DBG("attr %p is not initialized", attr);
283 		return false;
284 	}
285 
286 	stacksize = __get_attr_stacksize(attr);
287 	if (stacksize < PTHREAD_STACK_MIN) {
288 		LOG_DBG("attr %p has stacksize %zu is smaller than PTHREAD_STACK_MIN (%zu)", attr,
289 			stacksize, (size_t)PTHREAD_STACK_MIN);
290 		return false;
291 	}
292 
293 	/* require a valid scheduler policy */
294 	if (!valid_posix_policy(attr->schedpolicy)) {
295 		LOG_DBG("Invalid scheduler policy %d", attr->schedpolicy);
296 		return false;
297 	}
298 
299 	return true;
300 }
301 
__attr_is_initialized(const struct posix_thread_attr * attr)302 static bool __attr_is_initialized(const struct posix_thread_attr *attr)
303 {
304 	if (IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
305 		return __attr_is_runnable(attr);
306 	}
307 
308 	if (attr == NULL || !attr->initialized) {
309 		LOG_DBG("attr %p is not initialized", attr);
310 		return false;
311 	}
312 
313 	return true;
314 }
315 
316 /**
317  * @brief Set scheduling parameter attributes in thread attributes object.
318  *
319  * See IEEE 1003.1
320  */
pthread_attr_setschedparam(pthread_attr_t * _attr,const struct sched_param * schedparam)321 int pthread_attr_setschedparam(pthread_attr_t *_attr, const struct sched_param *schedparam)
322 {
323 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
324 
325 	if (!__attr_is_initialized(attr) || schedparam == NULL ||
326 	    !is_posix_policy_prio_valid(schedparam->sched_priority, attr->schedpolicy)) {
327 		LOG_DBG("Invalid pthread_attr_t or sched_param");
328 		return EINVAL;
329 	}
330 
331 	attr->priority = schedparam->sched_priority;
332 	return 0;
333 }
334 
335 /**
336  * @brief Set stack attributes in thread attributes object.
337  *
338  * See IEEE 1003.1
339  */
pthread_attr_setstack(pthread_attr_t * _attr,void * stackaddr,size_t stacksize)340 int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksize)
341 {
342 	int ret;
343 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
344 
345 	if (stackaddr == NULL) {
346 		LOG_DBG("NULL stack address");
347 		return EACCES;
348 	}
349 
350 	if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
351 	    stacksize > PTHREAD_STACK_MAX) {
352 		LOG_DBG("Invalid stacksize %zu", stacksize);
353 		return EINVAL;
354 	}
355 
356 	if (attr->stack != NULL) {
357 		ret = k_thread_stack_free(attr->stack);
358 		if (ret == 0) {
359 			LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
360 				__get_attr_stacksize(attr), attr->stack);
361 		}
362 	}
363 
364 	attr->stack = stackaddr;
365 	__set_attr_stacksize(attr, stacksize);
366 
367 	LOG_DBG("Assigned thread stack %zu@%p to attr %p", __get_attr_stacksize(attr), attr->stack,
368 		_attr);
369 
370 	return 0;
371 }
372 
373 /**
374  * @brief Get scope attributes in thread attributes object.
375  *
376  * See IEEE 1003.1
377  */
pthread_attr_getscope(const pthread_attr_t * _attr,int * contentionscope)378 int pthread_attr_getscope(const pthread_attr_t *_attr, int *contentionscope)
379 {
380 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
381 
382 	if (!__attr_is_initialized(attr) || contentionscope == NULL) {
383 		return EINVAL;
384 	}
385 	*contentionscope = attr->contentionscope;
386 	return 0;
387 }
388 
389 /**
390  * @brief Set scope attributes in thread attributes object.
391  *
392  * See IEEE 1003.1
393  */
pthread_attr_setscope(pthread_attr_t * _attr,int contentionscope)394 int pthread_attr_setscope(pthread_attr_t *_attr, int contentionscope)
395 {
396 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
397 
398 	if (!__attr_is_initialized(attr)) {
399 		LOG_DBG("attr %p is not initialized", attr);
400 		return EINVAL;
401 	}
402 	if (!(contentionscope == PTHREAD_SCOPE_PROCESS ||
403 	      contentionscope == PTHREAD_SCOPE_SYSTEM)) {
404 		LOG_DBG("%s contentionscope %d", "Invalid", contentionscope);
405 		return EINVAL;
406 	}
407 	if (contentionscope == PTHREAD_SCOPE_PROCESS) {
408 		/* Zephyr does not yet support processes or process scheduling */
409 		LOG_DBG("%s contentionscope %d", "Unsupported", contentionscope);
410 		return ENOTSUP;
411 	}
412 	attr->contentionscope = contentionscope;
413 	return 0;
414 }
415 
416 /**
417  * @brief Get inherit scheduler attributes in thread attributes object.
418  *
419  * See IEEE 1003.1
420  */
pthread_attr_getinheritsched(const pthread_attr_t * _attr,int * inheritsched)421 int pthread_attr_getinheritsched(const pthread_attr_t *_attr, int *inheritsched)
422 {
423 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
424 
425 	if (!__attr_is_initialized(attr) || inheritsched == NULL) {
426 		return EINVAL;
427 	}
428 	*inheritsched = attr->inheritsched;
429 	return 0;
430 }
431 
432 /**
433  * @brief Set inherit scheduler attributes in thread attributes object.
434  *
435  * See IEEE 1003.1
436  */
pthread_attr_setinheritsched(pthread_attr_t * _attr,int inheritsched)437 int pthread_attr_setinheritsched(pthread_attr_t *_attr, int inheritsched)
438 {
439 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
440 
441 	if (!__attr_is_initialized(attr)) {
442 		LOG_DBG("attr %p is not initialized", attr);
443 		return EINVAL;
444 	}
445 
446 	if (inheritsched != PTHREAD_INHERIT_SCHED && inheritsched != PTHREAD_EXPLICIT_SCHED) {
447 		LOG_DBG("Invalid inheritsched %d", inheritsched);
448 		return EINVAL;
449 	}
450 
451 	attr->inheritsched = inheritsched;
452 	return 0;
453 }
454 
posix_thread_recycle_work_handler(struct k_work * work)455 static void posix_thread_recycle_work_handler(struct k_work *work)
456 {
457 	ARG_UNUSED(work);
458 	posix_thread_recycle();
459 }
460 static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_work_handler);
461 
462 extern struct sys_sem pthread_key_lock;
463 
posix_thread_finalize(struct posix_thread * t,void * retval)464 static void posix_thread_finalize(struct posix_thread *t, void *retval)
465 {
466 	sys_snode_t *node_l, *node_s;
467 	pthread_key_obj *key_obj;
468 	pthread_thread_data *thread_spec_data;
469 	sys_snode_t *node_key_data, *node_key_data_s, *node_key_data_prev = NULL;
470 	struct pthread_key_data *key_data;
471 
472 	SYS_SLIST_FOR_EACH_NODE_SAFE(&t->key_list, node_l, node_s) {
473 		thread_spec_data = (pthread_thread_data *)node_l;
474 		if (thread_spec_data != NULL) {
475 			key_obj = thread_spec_data->key;
476 			if (key_obj->destructor != NULL) {
477 				(key_obj->destructor)(thread_spec_data->spec_data);
478 			}
479 
480 			SYS_SEM_LOCK(&pthread_key_lock) {
481 				SYS_SLIST_FOR_EACH_NODE_SAFE(
482 					&key_obj->key_data_l,
483 					node_key_data,
484 					node_key_data_s) {
485 					key_data = (struct pthread_key_data *)node_key_data;
486 					if (&key_data->thread_data == thread_spec_data) {
487 						sys_slist_remove(
488 							&key_obj->key_data_l,
489 							node_key_data_prev,
490 							node_key_data
491 						);
492 						k_free(key_data);
493 						break;
494 					}
495 					node_key_data_prev = node_key_data;
496 				}
497 			}
498 		}
499 	}
500 
501 	/* move thread from run_q to done_q */
502 	SYS_SEM_LOCK(&pthread_pool_lock) {
503 		sys_dlist_remove(&t->q_node);
504 		posix_thread_q_set(t, POSIX_THREAD_DONE_Q);
505 		t->retval = retval;
506 	}
507 
508 	/* trigger recycle work */
509 	(void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS));
510 
511 	/* abort the underlying k_thread */
512 	k_thread_abort(&t->thread);
513 }
514 
515 FUNC_NORETURN
zephyr_thread_wrapper(void * arg1,void * arg2,void * arg3)516 static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
517 {
518 	int err;
519 	int barrier;
520 	void *(*fun_ptr)(void *arg) = arg2;
521 	struct posix_thread *t = CONTAINER_OF(k_current_get(), struct posix_thread, thread);
522 
523 	if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
524 		/* cross the barrier so that pthread_create() can continue */
525 		barrier = POINTER_TO_UINT(arg3);
526 		err = pthread_barrier_wait(&barrier);
527 		__ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
528 	}
529 
530 	posix_thread_finalize(t, fun_ptr(arg1));
531 
532 	CODE_UNREACHABLE;
533 }
534 
posix_thread_recycle(void)535 static void posix_thread_recycle(void)
536 {
537 	struct posix_thread *t;
538 	struct posix_thread *safe_t;
539 	sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables);
540 
541 	SYS_SEM_LOCK(&pthread_pool_lock) {
542 		SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t,
543 						  q_node) {
544 			if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) {
545 				/* thread has not been joined yet */
546 				continue;
547 			}
548 
549 			sys_dlist_remove(&t->q_node);
550 			sys_dlist_append(&recyclables, &t->q_node);
551 		}
552 	}
553 
554 	if (sys_dlist_is_empty(&recyclables)) {
555 		return;
556 	}
557 
558 	LOG_DBG("Recycling %zu threads", sys_dlist_len(&recyclables));
559 
560 	SYS_DLIST_FOR_EACH_CONTAINER(&recyclables, t, q_node) {
561 		if (t->attr.caller_destroys) {
562 			t->attr = (struct posix_thread_attr){0};
563 		} else {
564 			(void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
565 		}
566 	}
567 
568 	SYS_SEM_LOCK(&pthread_pool_lock) {
569 		while (!sys_dlist_is_empty(&recyclables)) {
570 			t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node);
571 			posix_thread_q_set(t, POSIX_THREAD_READY_Q);
572 		}
573 	}
574 }
575 
576 /**
577  * @brief Create a new thread.
578  *
579  * Pthread attribute should not be NULL. API will return Error on NULL
580  * attribute value.
581  *
582  * See IEEE 1003.1
583  */
pthread_create(pthread_t * th,const pthread_attr_t * _attr,void * (* threadroutine)(void *),void * arg)584 int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadroutine)(void *),
585 		   void *arg)
586 {
587 	int err;
588 	pthread_barrier_t barrier;
589 	struct posix_thread *t = NULL;
590 
591 	if (!(_attr == NULL || __attr_is_runnable((struct posix_thread_attr *)_attr))) {
592 		return EINVAL;
593 	}
594 
595 	/* reclaim resources greedily */
596 	posix_thread_recycle();
597 
598 	SYS_SEM_LOCK(&pthread_pool_lock) {
599 		if (!sys_dlist_is_empty(&posix_thread_q[POSIX_THREAD_READY_Q])) {
600 			t = CONTAINER_OF(sys_dlist_get(&posix_thread_q[POSIX_THREAD_READY_Q]),
601 					 struct posix_thread, q_node);
602 
603 			/* initialize thread state */
604 			posix_thread_q_set(t, POSIX_THREAD_RUN_Q);
605 			sys_slist_init(&t->key_list);
606 			sys_slist_init(&t->cleanup_list);
607 		}
608 	}
609 
610 	if (t != NULL && IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
611 		err = pthread_barrier_init(&barrier, NULL, 2);
612 		if (err != 0) {
613 			/* cannot allocate barrier. move thread back to ready_q */
614 			SYS_SEM_LOCK(&pthread_pool_lock) {
615 				sys_dlist_remove(&t->q_node);
616 				posix_thread_q_set(t, POSIX_THREAD_READY_Q);
617 			}
618 			t = NULL;
619 		}
620 	}
621 
622 	if (t == NULL) {
623 		/* no threads are ready */
624 		LOG_DBG("No threads are ready");
625 		return EAGAIN;
626 	}
627 
628 	if (_attr == NULL) {
629 		err = pthread_attr_init((pthread_attr_t *)&t->attr);
630 		if (err == 0 && !__attr_is_runnable(&t->attr)) {
631 			(void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
632 			err = EINVAL;
633 		}
634 		if (err != 0) {
635 			/* cannot allocate pthread attributes (e.g. stack) */
636 			SYS_SEM_LOCK(&pthread_pool_lock) {
637 				sys_dlist_remove(&t->q_node);
638 				posix_thread_q_set(t, POSIX_THREAD_READY_Q);
639 			}
640 			return err;
641 		}
642 		/* caller not responsible for destroying attr */
643 		t->attr.caller_destroys = false;
644 	} else {
645 		/* copy user-provided attr into thread, caller must destroy attr at a later time */
646 		t->attr = *(struct posix_thread_attr *)_attr;
647 	}
648 
649 	if (t->attr.inheritsched == PTHREAD_INHERIT_SCHED) {
650 		int pol;
651 
652 		t->attr.priority =
653 			zephyr_to_posix_priority(k_thread_priority_get(k_current_get()), &pol);
654 		t->attr.schedpolicy = pol;
655 	}
656 
657 	/* spawn the thread */
658 	k_thread_create(
659 		&t->thread, t->attr.stack, __get_attr_stacksize(&t->attr) + t->attr.guardsize,
660 		zephyr_thread_wrapper, (void *)arg, threadroutine,
661 		IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER) ? UINT_TO_POINTER(barrier) : NULL,
662 		posix_to_zephyr_priority(t->attr.priority, t->attr.schedpolicy), 0, K_NO_WAIT);
663 
664 	if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
665 		/* wait for the spawned thread to cross our barrier */
666 		err = pthread_barrier_wait(&barrier);
667 		__ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
668 		err = pthread_barrier_destroy(&barrier);
669 		__ASSERT_NO_MSG(err == 0);
670 	}
671 
672 	/* finally provide the initialized thread to the caller */
673 	*th = mark_pthread_obj_initialized(posix_thread_to_offset(t));
674 
675 	LOG_DBG("Created pthread %p", &t->thread);
676 
677 	return 0;
678 }
679 
pthread_getconcurrency(void)680 int pthread_getconcurrency(void)
681 {
682 	int ret = 0;
683 
684 	SYS_SEM_LOCK(&pthread_pool_lock) {
685 		ret = pthread_concurrency;
686 	}
687 
688 	return ret;
689 }
690 
pthread_setconcurrency(int new_level)691 int pthread_setconcurrency(int new_level)
692 {
693 	if (new_level < 0) {
694 		return EINVAL;
695 	}
696 
697 	if (new_level > CONFIG_MP_MAX_NUM_CPUS) {
698 		return EAGAIN;
699 	}
700 
701 	SYS_SEM_LOCK(&pthread_pool_lock) {
702 		pthread_concurrency = new_level;
703 	}
704 
705 	return 0;
706 }
707 
708 /**
709  * @brief Set cancelability State.
710  *
711  * See IEEE 1003.1
712  */
pthread_setcancelstate(int state,int * oldstate)713 int pthread_setcancelstate(int state, int *oldstate)
714 {
715 	int ret = EINVAL;
716 	bool cancel_pending = false;
717 	struct posix_thread *t = NULL;
718 	bool cancel_type = -1;
719 
720 	if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) {
721 		LOG_DBG("Invalid pthread state %d", state);
722 		return EINVAL;
723 	}
724 
725 	SYS_SEM_LOCK(&pthread_pool_lock) {
726 		t = to_posix_thread(pthread_self());
727 		if (t == NULL) {
728 			ret = EINVAL;
729 			SYS_SEM_LOCK_BREAK;
730 		}
731 
732 		if (oldstate != NULL) {
733 			*oldstate = t->attr.cancelstate;
734 		}
735 
736 		t->attr.cancelstate = state;
737 		cancel_pending = t->attr.cancelpending;
738 		cancel_type = t->attr.canceltype;
739 
740 		ret = 0;
741 	}
742 
743 	if (ret == 0 && state == PTHREAD_CANCEL_ENABLE &&
744 	    cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS && cancel_pending) {
745 		posix_thread_finalize(t, PTHREAD_CANCELED);
746 	}
747 
748 	return ret;
749 }
750 
751 /**
752  * @brief Set cancelability Type.
753  *
754  * See IEEE 1003.1
755  */
pthread_setcanceltype(int type,int * oldtype)756 int pthread_setcanceltype(int type, int *oldtype)
757 {
758 	int ret = EINVAL;
759 	struct posix_thread *t;
760 
761 	if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS) {
762 		LOG_DBG("Invalid pthread cancel type %d", type);
763 		return EINVAL;
764 	}
765 
766 	SYS_SEM_LOCK(&pthread_pool_lock) {
767 		t = to_posix_thread(pthread_self());
768 		if (t == NULL) {
769 			ret = EINVAL;
770 			SYS_SEM_LOCK_BREAK;
771 		}
772 
773 		if (oldtype != NULL) {
774 			*oldtype = t->attr.canceltype;
775 		}
776 		t->attr.canceltype = type;
777 
778 		ret = 0;
779 	}
780 
781 	return ret;
782 }
783 
784 /**
785  * @brief Create a cancellation point in the calling thread.
786  *
787  * See IEEE 1003.1
788  */
pthread_testcancel(void)789 void pthread_testcancel(void)
790 {
791 	bool cancel_pended = false;
792 	struct posix_thread *t = NULL;
793 
794 	SYS_SEM_LOCK(&pthread_pool_lock) {
795 		t = to_posix_thread(pthread_self());
796 		if (t == NULL) {
797 			SYS_SEM_LOCK_BREAK;
798 		}
799 		if (t->attr.cancelstate != PTHREAD_CANCEL_ENABLE) {
800 			SYS_SEM_LOCK_BREAK;
801 		}
802 		if (t->attr.cancelpending) {
803 			cancel_pended = true;
804 			t->attr.cancelstate = PTHREAD_CANCEL_DISABLE;
805 		}
806 	}
807 
808 	if (cancel_pended) {
809 		posix_thread_finalize(t, PTHREAD_CANCELED);
810 	}
811 }
812 
813 /**
814  * @brief Cancel execution of a thread.
815  *
816  * See IEEE 1003.1
817  */
pthread_cancel(pthread_t pthread)818 int pthread_cancel(pthread_t pthread)
819 {
820 	int ret = ESRCH;
821 	bool cancel_state = PTHREAD_CANCEL_ENABLE;
822 	bool cancel_type = PTHREAD_CANCEL_DEFERRED;
823 	struct posix_thread *t = NULL;
824 
825 	SYS_SEM_LOCK(&pthread_pool_lock) {
826 		t = to_posix_thread(pthread);
827 		if (t == NULL) {
828 			ret = ESRCH;
829 			SYS_SEM_LOCK_BREAK;
830 		}
831 
832 		if (!__attr_is_initialized(&t->attr)) {
833 			/* thread has already terminated */
834 			ret = ESRCH;
835 			SYS_SEM_LOCK_BREAK;
836 		}
837 
838 		ret = 0;
839 		t->attr.cancelpending = true;
840 		cancel_state = t->attr.cancelstate;
841 		cancel_type = t->attr.canceltype;
842 	}
843 
844 	if (ret == 0 && cancel_state == PTHREAD_CANCEL_ENABLE &&
845 	    cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS) {
846 		posix_thread_finalize(t, PTHREAD_CANCELED);
847 	}
848 
849 	return ret;
850 }
851 
852 /**
853  * @brief Set thread scheduling policy and parameters.
854  *
855  * See IEEE 1003.1
856  */
pthread_setschedparam(pthread_t pthread,int policy,const struct sched_param * param)857 int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param)
858 {
859 	int ret = ESRCH;
860 	int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
861 	struct posix_thread *t = NULL;
862 
863 	if (param == NULL || !valid_posix_policy(policy) ||
864 	    !is_posix_policy_prio_valid(param->sched_priority, policy)) {
865 		return EINVAL;
866 	}
867 
868 	SYS_SEM_LOCK(&pthread_pool_lock) {
869 		t = to_posix_thread(pthread);
870 		if (t == NULL) {
871 			ret = ESRCH;
872 			SYS_SEM_LOCK_BREAK;
873 		}
874 
875 		ret = 0;
876 		new_prio = posix_to_zephyr_priority(param->sched_priority, policy);
877 	}
878 
879 	if (ret == 0) {
880 		k_thread_priority_set(&t->thread, new_prio);
881 	}
882 
883 	return ret;
884 }
885 
886 /**
887  * @brief Set thread scheduling priority.
888  *
889  * See IEEE 1003.1
890  */
pthread_setschedprio(pthread_t thread,int prio)891 int pthread_setschedprio(pthread_t thread, int prio)
892 {
893 	int ret;
894 	int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
895 	struct posix_thread *t = NULL;
896 	int policy = -1;
897 	struct sched_param param;
898 
899 	ret = pthread_getschedparam(thread, &policy, &param);
900 	if (ret != 0) {
901 		return ret;
902 	}
903 
904 	if (!is_posix_policy_prio_valid(prio, policy)) {
905 		return EINVAL;
906 	}
907 
908 	ret = ESRCH;
909 	SYS_SEM_LOCK(&pthread_pool_lock) {
910 		t = to_posix_thread(thread);
911 		if (t == NULL) {
912 			ret = ESRCH;
913 			SYS_SEM_LOCK_BREAK;
914 		}
915 
916 		ret = 0;
917 		new_prio = posix_to_zephyr_priority(prio, policy);
918 	}
919 
920 	if (ret == 0) {
921 		k_thread_priority_set(&t->thread, new_prio);
922 	}
923 
924 	return ret;
925 }
926 
927 /**
928  * @brief Initialise threads attribute object
929  *
930  * See IEEE 1003.1
931  */
pthread_attr_init(pthread_attr_t * _attr)932 int pthread_attr_init(pthread_attr_t *_attr)
933 {
934 	struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
935 
936 	if (attr == NULL) {
937 		LOG_DBG("Invalid attr pointer");
938 		return ENOMEM;
939 	}
940 
941 	BUILD_ASSERT(DYNAMIC_STACK_SIZE <= PTHREAD_STACK_MAX);
942 
943 	*attr = (struct posix_thread_attr){0};
944 	attr->guardsize = CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT;
945 	attr->contentionscope = PTHREAD_SCOPE_SYSTEM;
946 	attr->inheritsched = PTHREAD_INHERIT_SCHED;
947 
948 	if (DYNAMIC_STACK_SIZE > 0) {
949 		attr->stack = k_thread_stack_alloc(DYNAMIC_STACK_SIZE + attr->guardsize,
950 						   k_is_user_context() ? K_USER : 0);
951 		if (attr->stack == NULL) {
952 			LOG_DBG("Did not auto-allocate thread stack");
953 		} else {
954 			__set_attr_stacksize(attr, DYNAMIC_STACK_SIZE);
955 			__ASSERT_NO_MSG(__attr_is_initialized(attr));
956 			LOG_DBG("Allocated thread stack %zu@%p", __get_attr_stacksize(attr),
957 				attr->stack);
958 		}
959 	}
960 
961 	/* caller responsible for destroying attr */
962 	attr->initialized = true;
963 
964 	LOG_DBG("Initialized attr %p", _attr);
965 
966 	return 0;
967 }
968 
969 /**
970  * @brief Get thread scheduling policy and parameters
971  *
972  * See IEEE 1003.1
973  */
pthread_getschedparam(pthread_t pthread,int * policy,struct sched_param * param)974 int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
975 {
976 	int ret = ESRCH;
977 	struct posix_thread *t;
978 
979 	if (policy == NULL || param == NULL) {
980 		return EINVAL;
981 	}
982 
983 	SYS_SEM_LOCK(&pthread_pool_lock) {
984 		t = to_posix_thread(pthread);
985 		if (t == NULL) {
986 			ret = ESRCH;
987 			SYS_SEM_LOCK_BREAK;
988 		}
989 
990 		if (!__attr_is_initialized(&t->attr)) {
991 			ret = ESRCH;
992 			SYS_SEM_LOCK_BREAK;
993 		}
994 
995 		ret = 0;
996 		param->sched_priority =
997 			zephyr_to_posix_priority(k_thread_priority_get(&t->thread), policy);
998 	}
999 
1000 	return ret;
1001 }
1002 
1003 /**
1004  * @brief Dynamic package initialization
1005  *
1006  * See IEEE 1003.1
1007  */
pthread_once(pthread_once_t * once,void (* init_func)(void))1008 int pthread_once(pthread_once_t *once, void (*init_func)(void))
1009 {
1010 	int ret = EINVAL;
1011 	bool run_init_func = false;
1012 	struct pthread_once *const _once = (struct pthread_once *)once;
1013 
1014 	if (init_func == NULL) {
1015 		return EINVAL;
1016 	}
1017 
1018 	SYS_SEM_LOCK(&pthread_pool_lock) {
1019 		if (!_once->flag) {
1020 			run_init_func = true;
1021 			_once->flag = true;
1022 		}
1023 		ret = 0;
1024 	}
1025 
1026 	if (ret == 0 && run_init_func) {
1027 		init_func();
1028 	}
1029 
1030 	return ret;
1031 }
1032 
1033 /**
1034  * @brief Terminate calling thread.
1035  *
1036  * See IEEE 1003.1
1037  */
1038 FUNC_NORETURN
pthread_exit(void * retval)1039 void pthread_exit(void *retval)
1040 {
1041 	struct posix_thread *self = NULL;
1042 
1043 	SYS_SEM_LOCK(&pthread_pool_lock) {
1044 		self = to_posix_thread(pthread_self());
1045 		if (self == NULL) {
1046 			SYS_SEM_LOCK_BREAK;
1047 		}
1048 
1049 		/* Mark a thread as cancellable before exiting */
1050 		self->attr.cancelstate = PTHREAD_CANCEL_ENABLE;
1051 	}
1052 
1053 	if (self == NULL) {
1054 		/* not a valid posix_thread */
1055 		LOG_DBG("Aborting non-pthread %p", k_current_get());
1056 		k_thread_abort(k_current_get());
1057 
1058 		CODE_UNREACHABLE;
1059 	}
1060 
1061 	posix_thread_finalize(self, retval);
1062 	CODE_UNREACHABLE;
1063 }
1064 
pthread_timedjoin_internal(pthread_t pthread,void ** status,k_timeout_t timeout)1065 static int pthread_timedjoin_internal(pthread_t pthread, void **status, k_timeout_t timeout)
1066 {
1067 	int ret = ESRCH;
1068 	struct posix_thread *t = NULL;
1069 
1070 	if (pthread == pthread_self()) {
1071 		LOG_DBG("Pthread attempted to join itself (%x)", pthread);
1072 		return EDEADLK;
1073 	}
1074 
1075 	SYS_SEM_LOCK(&pthread_pool_lock) {
1076 		t = to_posix_thread(pthread);
1077 		if (t == NULL) {
1078 			ret = ESRCH;
1079 			SYS_SEM_LOCK_BREAK;
1080 		}
1081 
1082 		LOG_DBG("Pthread %p joining..", &t->thread);
1083 
1084 		if (t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1085 			/* undefined behaviour */
1086 			ret = EINVAL;
1087 			SYS_SEM_LOCK_BREAK;
1088 		}
1089 
1090 		if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q) {
1091 			ret = ESRCH;
1092 			SYS_SEM_LOCK_BREAK;
1093 		}
1094 
1095 		/*
1096 		 * thread is joinable and is in run_q or done_q.
1097 		 * let's ensure that the thread cannot be joined again after this point.
1098 		 */
1099 		ret = 0;
1100 		t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1101 	}
1102 
1103 	switch (ret) {
1104 	case ESRCH:
1105 		LOG_DBG("Pthread %p has already been joined", &t->thread);
1106 		return ret;
1107 	case EINVAL:
1108 		LOG_DBG("Pthread %p is not a joinable", &t->thread);
1109 		return ret;
1110 	case 0:
1111 		break;
1112 	}
1113 
1114 	ret = k_thread_join(&t->thread, timeout);
1115 	if (ret != 0) {
1116 		/* when joining failed, ensure that the thread can be joined later */
1117 		SYS_SEM_LOCK(&pthread_pool_lock) {
1118 			t->attr.detachstate = PTHREAD_CREATE_JOINABLE;
1119 		}
1120 	}
1121 	if (ret == -EBUSY) {
1122 		return EBUSY;
1123 	} else if (ret == -EAGAIN) {
1124 		return ETIMEDOUT;
1125 	}
1126 	/* Can only be ok or -EDEADLK, which should never occur for pthreads */
1127 	__ASSERT_NO_MSG(ret == 0);
1128 
1129 	LOG_DBG("Joined pthread %p", &t->thread);
1130 
1131 	if (status != NULL) {
1132 		LOG_DBG("Writing status to %p", status);
1133 		*status = t->retval;
1134 	}
1135 
1136 	posix_thread_recycle();
1137 
1138 	return 0;
1139 }
1140 
1141 /**
1142  * @brief Await a thread termination with timeout.
1143  *
1144  * Non-portable GNU extension of IEEE 1003.1
1145  */
pthread_timedjoin_np(pthread_t pthread,void ** status,const struct timespec * abstime)1146 int pthread_timedjoin_np(pthread_t pthread, void **status, const struct timespec *abstime)
1147 {
1148 	if (abstime == NULL) {
1149 		return EINVAL;
1150 	}
1151 
1152 	if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= NSEC_PER_SEC) {
1153 		return EINVAL;
1154 	}
1155 
1156 	return pthread_timedjoin_internal(pthread, status, K_MSEC(timespec_to_timeoutms(abstime)));
1157 }
1158 
1159 /**
1160  * @brief Check a thread for termination.
1161  *
1162  * Non-portable GNU extension of IEEE 1003.1
1163  */
pthread_tryjoin_np(pthread_t pthread,void ** status)1164 int pthread_tryjoin_np(pthread_t pthread, void **status)
1165 {
1166 	return pthread_timedjoin_internal(pthread, status, K_NO_WAIT);
1167 }
1168 
1169 /**
1170  * @brief Await a thread termination.
1171  *
1172  * See IEEE 1003.1
1173  */
pthread_join(pthread_t pthread,void ** status)1174 int pthread_join(pthread_t pthread, void **status)
1175 {
1176 	return pthread_timedjoin_internal(pthread, status, K_FOREVER);
1177 }
1178 
1179 /**
1180  * @brief Detach a thread.
1181  *
1182  * See IEEE 1003.1
1183  */
pthread_detach(pthread_t pthread)1184 int pthread_detach(pthread_t pthread)
1185 {
1186 	int ret = ESRCH;
1187 	struct posix_thread *t = NULL;
1188 
1189 	SYS_SEM_LOCK(&pthread_pool_lock) {
1190 		t = to_posix_thread(pthread);
1191 		if (t == NULL) {
1192 			ret = ESRCH;
1193 			SYS_SEM_LOCK_BREAK;
1194 		}
1195 
1196 		if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
1197 		    t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1198 			LOG_DBG("Pthread %p cannot be detached", &t->thread);
1199 			ret = EINVAL;
1200 			SYS_SEM_LOCK_BREAK;
1201 		}
1202 
1203 		ret = 0;
1204 		t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1205 	}
1206 
1207 	if (ret == 0) {
1208 		LOG_DBG("Pthread %p detached", &t->thread);
1209 	}
1210 
1211 	return ret;
1212 }
1213 
1214 /**
1215  * @brief Get detach state attribute in thread attributes object.
1216  *
1217  * See IEEE 1003.1
1218  */
pthread_attr_getdetachstate(const pthread_attr_t * _attr,int * detachstate)1219 int pthread_attr_getdetachstate(const pthread_attr_t *_attr, int *detachstate)
1220 {
1221 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1222 
1223 	if (!__attr_is_initialized(attr) || (detachstate == NULL)) {
1224 		return EINVAL;
1225 	}
1226 
1227 	*detachstate = attr->detachstate;
1228 	return 0;
1229 }
1230 
1231 /**
1232  * @brief Set detach state attribute in thread attributes object.
1233  *
1234  * See IEEE 1003.1
1235  */
pthread_attr_setdetachstate(pthread_attr_t * _attr,int detachstate)1236 int pthread_attr_setdetachstate(pthread_attr_t *_attr, int detachstate)
1237 {
1238 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1239 
1240 	if (!__attr_is_initialized(attr) || ((detachstate != PTHREAD_CREATE_DETACHED) &&
1241 					     (detachstate != PTHREAD_CREATE_JOINABLE))) {
1242 		return EINVAL;
1243 	}
1244 
1245 	attr->detachstate = detachstate;
1246 	return 0;
1247 }
1248 
1249 /**
1250  * @brief Get scheduling policy attribute in Thread attributes.
1251  *
1252  * See IEEE 1003.1
1253  */
pthread_attr_getschedpolicy(const pthread_attr_t * _attr,int * policy)1254 int pthread_attr_getschedpolicy(const pthread_attr_t *_attr, int *policy)
1255 {
1256 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1257 
1258 	if (!__attr_is_initialized(attr) || (policy == NULL)) {
1259 		return EINVAL;
1260 	}
1261 
1262 	*policy = attr->schedpolicy;
1263 	return 0;
1264 }
1265 
1266 /**
1267  * @brief Set scheduling policy attribute in Thread attributes object.
1268  *
1269  * See IEEE 1003.1
1270  */
pthread_attr_setschedpolicy(pthread_attr_t * _attr,int policy)1271 int pthread_attr_setschedpolicy(pthread_attr_t *_attr, int policy)
1272 {
1273 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1274 
1275 	if (!__attr_is_initialized(attr) || !valid_posix_policy(policy)) {
1276 		return EINVAL;
1277 	}
1278 
1279 	attr->schedpolicy = policy;
1280 	return 0;
1281 }
1282 
1283 /**
1284  * @brief Get stack size attribute in thread attributes object.
1285  *
1286  * See IEEE 1003.1
1287  */
pthread_attr_getstacksize(const pthread_attr_t * _attr,size_t * stacksize)1288 int pthread_attr_getstacksize(const pthread_attr_t *_attr, size_t *stacksize)
1289 {
1290 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1291 
1292 	if (!__attr_is_initialized(attr) || (stacksize == NULL)) {
1293 		return EINVAL;
1294 	}
1295 
1296 	*stacksize = __get_attr_stacksize(attr);
1297 	return 0;
1298 }
1299 
1300 /**
1301  * @brief Set stack size attribute in thread attributes object.
1302  *
1303  * See IEEE 1003.1
1304  */
pthread_attr_setstacksize(pthread_attr_t * _attr,size_t stacksize)1305 int pthread_attr_setstacksize(pthread_attr_t *_attr, size_t stacksize)
1306 {
1307 	int ret;
1308 	void *new_stack;
1309 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1310 
1311 	if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
1312 	    stacksize > PTHREAD_STACK_MAX) {
1313 		return EINVAL;
1314 	}
1315 
1316 	if (__get_attr_stacksize(attr) == stacksize) {
1317 		return 0;
1318 	}
1319 
1320 	new_stack =
1321 		k_thread_stack_alloc(stacksize + attr->guardsize, k_is_user_context() ? K_USER : 0);
1322 	if (new_stack == NULL) {
1323 		if (stacksize < __get_attr_stacksize(attr)) {
1324 			__set_attr_stacksize(attr, stacksize);
1325 			return 0;
1326 		}
1327 
1328 		LOG_DBG("k_thread_stack_alloc(%zu) failed",
1329 			__get_attr_stacksize(attr) + attr->guardsize);
1330 		return ENOMEM;
1331 	}
1332 	LOG_DBG("Allocated thread stack %zu@%p", stacksize + attr->guardsize, new_stack);
1333 
1334 	if (attr->stack != NULL) {
1335 		ret = k_thread_stack_free(attr->stack);
1336 		if (ret == 0) {
1337 			LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
1338 				__get_attr_stacksize(attr), attr->stack);
1339 		}
1340 	}
1341 
1342 	__set_attr_stacksize(attr, stacksize);
1343 	attr->stack = new_stack;
1344 
1345 	return 0;
1346 }
1347 
1348 /**
1349  * @brief Get stack attributes in thread attributes object.
1350  *
1351  * See IEEE 1003.1
1352  */
pthread_attr_getstack(const pthread_attr_t * _attr,void ** stackaddr,size_t * stacksize)1353 int pthread_attr_getstack(const pthread_attr_t *_attr, void **stackaddr, size_t *stacksize)
1354 {
1355 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1356 
1357 	if (!__attr_is_initialized(attr) || (stackaddr == NULL) || (stacksize == NULL)) {
1358 		return EINVAL;
1359 	}
1360 
1361 	*stackaddr = attr->stack;
1362 	*stacksize = __get_attr_stacksize(attr);
1363 	return 0;
1364 }
1365 
pthread_attr_getguardsize(const pthread_attr_t * ZRESTRICT _attr,size_t * ZRESTRICT guardsize)1366 int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT _attr, size_t *ZRESTRICT guardsize)
1367 {
1368 	struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1369 
1370 	if (!__attr_is_initialized(attr) || guardsize == NULL) {
1371 		return EINVAL;
1372 	}
1373 
1374 	*guardsize = attr->guardsize;
1375 
1376 	return 0;
1377 }
1378 
pthread_attr_setguardsize(pthread_attr_t * _attr,size_t guardsize)1379 int pthread_attr_setguardsize(pthread_attr_t *_attr, size_t guardsize)
1380 {
1381 	struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1382 
1383 	if (!__attr_is_initialized(attr) || guardsize > PTHREAD_GUARD_MAX) {
1384 		return EINVAL;
1385 	}
1386 
1387 	attr->guardsize = guardsize;
1388 
1389 	return 0;
1390 }
1391 
1392 /**
1393  * @brief Get thread attributes object scheduling parameters.
1394  *
1395  * See IEEE 1003.1
1396  */
pthread_attr_getschedparam(const pthread_attr_t * _attr,struct sched_param * schedparam)1397 int pthread_attr_getschedparam(const pthread_attr_t *_attr, struct sched_param *schedparam)
1398 {
1399 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1400 
1401 	if (!__attr_is_initialized(attr) || (schedparam == NULL)) {
1402 		return EINVAL;
1403 	}
1404 
1405 	schedparam->sched_priority = attr->priority;
1406 	return 0;
1407 }
1408 
1409 /**
1410  * @brief Destroy thread attributes object.
1411  *
1412  * See IEEE 1003.1
1413  */
pthread_attr_destroy(pthread_attr_t * _attr)1414 int pthread_attr_destroy(pthread_attr_t *_attr)
1415 {
1416 	int ret;
1417 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1418 
1419 	if (!__attr_is_initialized(attr)) {
1420 		return EINVAL;
1421 	}
1422 
1423 	ret = k_thread_stack_free(attr->stack);
1424 	if (ret == 0) {
1425 		LOG_DBG("Freed attr %p thread stack %zu@%p", _attr, __get_attr_stacksize(attr),
1426 			attr->stack);
1427 	}
1428 
1429 	*attr = (struct posix_thread_attr){0};
1430 	LOG_DBG("Destroyed attr %p", _attr);
1431 
1432 	return 0;
1433 }
1434 
pthread_setname_np(pthread_t thread,const char * name)1435 int pthread_setname_np(pthread_t thread, const char *name)
1436 {
1437 #ifdef CONFIG_THREAD_NAME
1438 	k_tid_t kthread;
1439 
1440 	thread = get_posix_thread_idx(thread);
1441 	if (thread >= ARRAY_SIZE(posix_thread_pool)) {
1442 		return ESRCH;
1443 	}
1444 
1445 	kthread = &posix_thread_pool[thread].thread;
1446 
1447 	if (name == NULL) {
1448 		return EINVAL;
1449 	}
1450 
1451 	return k_thread_name_set(kthread, name);
1452 #else
1453 	ARG_UNUSED(thread);
1454 	ARG_UNUSED(name);
1455 	return 0;
1456 #endif
1457 }
1458 
pthread_getname_np(pthread_t thread,char * name,size_t len)1459 int pthread_getname_np(pthread_t thread, char *name, size_t len)
1460 {
1461 #ifdef CONFIG_THREAD_NAME
1462 	k_tid_t kthread;
1463 
1464 	thread = get_posix_thread_idx(thread);
1465 	if (thread >= ARRAY_SIZE(posix_thread_pool)) {
1466 		return ESRCH;
1467 	}
1468 
1469 	if (name == NULL) {
1470 		return EINVAL;
1471 	}
1472 
1473 	memset(name, '\0', len);
1474 	kthread = &posix_thread_pool[thread].thread;
1475 	return k_thread_name_copy(kthread, name, len - 1);
1476 #else
1477 	ARG_UNUSED(thread);
1478 	ARG_UNUSED(name);
1479 	ARG_UNUSED(len);
1480 	return 0;
1481 #endif
1482 }
1483 
pthread_atfork(void (* prepare)(void),void (* parent)(void),void (* child)(void))1484 int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
1485 {
1486 	ARG_UNUSED(prepare);
1487 	ARG_UNUSED(parent);
1488 	ARG_UNUSED(child);
1489 
1490 	return ENOSYS;
1491 }
1492 
1493 /* this should probably go into signal.c but we need access to the lock */
pthread_sigmask(int how,const sigset_t * ZRESTRICT set,sigset_t * ZRESTRICT oset)1494 int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT oset)
1495 {
1496 	int ret = ESRCH;
1497 	struct posix_thread *t = NULL;
1498 
1499 	if (!(how == SIG_BLOCK || how == SIG_SETMASK || how == SIG_UNBLOCK)) {
1500 		return EINVAL;
1501 	}
1502 
1503 	SYS_SEM_LOCK(&pthread_pool_lock) {
1504 		t = to_posix_thread(pthread_self());
1505 		if (t == NULL) {
1506 			ret = ESRCH;
1507 			SYS_SEM_LOCK_BREAK;
1508 		}
1509 
1510 		if (oset != NULL) {
1511 			*oset = t->sigset;
1512 		}
1513 
1514 		ret = 0;
1515 		if (set == NULL) {
1516 			SYS_SEM_LOCK_BREAK;
1517 		}
1518 
1519 		switch (how) {
1520 		case SIG_BLOCK:
1521 			for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1522 				t->sigset.sig[i] |= set->sig[i];
1523 			}
1524 			break;
1525 		case SIG_SETMASK:
1526 			t->sigset = *set;
1527 			break;
1528 		case SIG_UNBLOCK:
1529 			for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1530 				t->sigset.sig[i] &= ~set->sig[i];
1531 			}
1532 			break;
1533 		}
1534 	}
1535 
1536 	return ret;
1537 }
1538 
posix_thread_pool_init(void)1539 static int posix_thread_pool_init(void)
1540 {
1541 	ARRAY_FOR_EACH_PTR(posix_thread_pool, th) {
1542 		posix_thread_q_set(th, POSIX_THREAD_READY_Q);
1543 	}
1544 
1545 	return 0;
1546 }
1547 SYS_INIT(posix_thread_pool_init, PRE_KERNEL_1, 0);
1548