1 /*
2  * Copyright (c) 2018 Intel Corporation
3  * Copyright (c) 2023 Meta
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include "posix_internal.h"
9 #include "pthread_sched.h"
10 
11 #include <stdio.h>
12 
13 #include <zephyr/init.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/logging/log.h>
16 #include <zephyr/sys/atomic.h>
17 #include <zephyr/posix/pthread.h>
18 #include <zephyr/posix/unistd.h>
19 #include <zephyr/sys/sem.h>
20 #include <zephyr/sys/slist.h>
21 #include <zephyr/sys/util.h>
22 
23 #define ZEPHYR_TO_POSIX_PRIORITY(_zprio)                                                           \
24 	(((_zprio) < 0) ? (-1 * ((_zprio) + 1)) : (CONFIG_NUM_PREEMPT_PRIORITIES - (_zprio)-1))
25 
26 #define POSIX_TO_ZEPHYR_PRIORITY(_prio, _pol)                                                      \
27 	(((_pol) == SCHED_FIFO) ? (-1 * ((_prio) + 1))                                             \
28 				: (CONFIG_NUM_PREEMPT_PRIORITIES - (_prio)-1))
29 
30 #define DEFAULT_PTHREAD_PRIORITY                                                                   \
31 	POSIX_TO_ZEPHYR_PRIORITY(K_LOWEST_APPLICATION_THREAD_PRIO, DEFAULT_PTHREAD_POLICY)
32 #define DEFAULT_PTHREAD_POLICY (IS_ENABLED(CONFIG_PREEMPT_ENABLED) ? SCHED_RR : SCHED_FIFO)
33 
34 #define PTHREAD_STACK_MAX BIT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS)
35 #define PTHREAD_GUARD_MAX BIT_MASK(CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS)
36 
37 LOG_MODULE_REGISTER(pthread, CONFIG_PTHREAD_LOG_LEVEL);
38 
39 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
40 #define DYNAMIC_STACK_SIZE CONFIG_DYNAMIC_THREAD_STACK_SIZE
41 #else
42 #define DYNAMIC_STACK_SIZE 0
43 #endif
44 
__get_attr_stacksize(const struct posix_thread_attr * attr)45 static inline size_t __get_attr_stacksize(const struct posix_thread_attr *attr)
46 {
47 	return attr->stacksize + 1;
48 }
49 
__set_attr_stacksize(struct posix_thread_attr * attr,size_t stacksize)50 static inline void __set_attr_stacksize(struct posix_thread_attr *attr, size_t stacksize)
51 {
52 	attr->stacksize = stacksize - 1;
53 }
54 
55 struct __pthread_cleanup {
56 	void (*routine)(void *arg);
57 	void *arg;
58 	sys_snode_t node;
59 };
60 
61 enum posix_thread_qid {
62 	/* ready to be started via pthread_create() */
63 	POSIX_THREAD_READY_Q,
64 	/* running */
65 	POSIX_THREAD_RUN_Q,
66 	/* exited (either joinable or detached) */
67 	POSIX_THREAD_DONE_Q,
68 	/* invalid */
69 	POSIX_THREAD_INVALID_Q,
70 };
71 
72 /* only 2 bits in struct posix_thread_attr for schedpolicy */
73 BUILD_ASSERT(SCHED_OTHER < BIT(2) && SCHED_FIFO < BIT(2) && SCHED_RR < BIT(2));
74 
75 BUILD_ASSERT((PTHREAD_CREATE_DETACHED == 0 || PTHREAD_CREATE_JOINABLE == 0) &&
76 	     (PTHREAD_CREATE_DETACHED == 1 || PTHREAD_CREATE_JOINABLE == 1));
77 
78 BUILD_ASSERT((PTHREAD_CANCEL_ENABLE == 0 || PTHREAD_CANCEL_DISABLE == 0) &&
79 	     (PTHREAD_CANCEL_ENABLE == 1 || PTHREAD_CANCEL_DISABLE == 1));
80 
81 BUILD_ASSERT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS + CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS <=
82 	     32);
83 
84 int64_t timespec_to_timeoutms(const struct timespec *abstime);
85 static void posix_thread_recycle(void);
86 
87 __pinned_data
88 static sys_dlist_t posix_thread_q[] = {
89 	SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_READY_Q]),
90 	SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_RUN_Q]),
91 	SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_DONE_Q]),
92 };
93 
94 static __pinned_bss struct posix_thread posix_thread_pool[CONFIG_POSIX_THREAD_THREADS_MAX];
95 
96 static SYS_SEM_DEFINE(pthread_pool_lock, 1, 1);
97 static int pthread_concurrency;
98 
posix_thread_q_set(struct posix_thread * t,enum posix_thread_qid qid)99 static inline void posix_thread_q_set(struct posix_thread *t, enum posix_thread_qid qid)
100 {
101 	switch (qid) {
102 	case POSIX_THREAD_READY_Q:
103 	case POSIX_THREAD_RUN_Q:
104 	case POSIX_THREAD_DONE_Q:
105 		sys_dlist_append(&posix_thread_q[qid], &t->q_node);
106 		t->qid = qid;
107 		break;
108 	default:
109 		__ASSERT(false, "cannot set invalid qid %d for posix thread %p", qid, t);
110 		break;
111 	}
112 }
113 
posix_thread_q_get(struct posix_thread * t)114 static inline enum posix_thread_qid posix_thread_q_get(struct posix_thread *t)
115 {
116 	switch (t->qid) {
117 	case POSIX_THREAD_READY_Q:
118 	case POSIX_THREAD_RUN_Q:
119 	case POSIX_THREAD_DONE_Q:
120 		return t->qid;
121 	default:
122 		__ASSERT(false, "posix thread %p has invalid qid: %d", t, t->qid);
123 		return POSIX_THREAD_INVALID_Q;
124 	}
125 }
126 
127 /*
128  * We reserve the MSB to mark a pthread_t as initialized (from the
129  * perspective of the application). With a linear space, this means that
130  * the theoretical pthread_t range is [0,2147483647].
131  */
132 BUILD_ASSERT(CONFIG_POSIX_THREAD_THREADS_MAX < PTHREAD_OBJ_MASK_INIT,
133 	     "CONFIG_POSIX_THREAD_THREADS_MAX is too high");
134 
posix_thread_to_offset(struct posix_thread * t)135 static inline size_t posix_thread_to_offset(struct posix_thread *t)
136 {
137 	return t - posix_thread_pool;
138 }
139 
get_posix_thread_idx(pthread_t pth)140 static inline size_t get_posix_thread_idx(pthread_t pth)
141 {
142 	return mark_pthread_obj_uninitialized(pth);
143 }
144 
to_posix_thread(pthread_t pthread)145 struct posix_thread *to_posix_thread(pthread_t pthread)
146 {
147 	struct posix_thread *t;
148 	bool actually_initialized;
149 	size_t bit = get_posix_thread_idx(pthread);
150 
151 	/* if the provided thread does not claim to be initialized, its invalid */
152 	if (!is_pthread_obj_initialized(pthread)) {
153 		LOG_DBG("pthread is not initialized (%x)", pthread);
154 		return NULL;
155 	}
156 
157 	if (bit >= ARRAY_SIZE(posix_thread_pool)) {
158 		LOG_DBG("Invalid pthread (%x)", pthread);
159 		return NULL;
160 	}
161 
162 	t = &posix_thread_pool[bit];
163 
164 	/*
165 	 * Denote a pthread as "initialized" (i.e. allocated) if it is not in ready_q.
166 	 * This differs from other posix object allocation strategies because they use
167 	 * a bitarray to indicate whether an object has been allocated.
168 	 */
169 	actually_initialized = !(posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
170 				 (posix_thread_q_get(t) == POSIX_THREAD_DONE_Q &&
171 				  t->attr.detachstate == PTHREAD_CREATE_DETACHED));
172 
173 	if (!actually_initialized) {
174 		LOG_DBG("Pthread claims to be initialized (%x)", pthread);
175 		return NULL;
176 	}
177 
178 	return &posix_thread_pool[bit];
179 }
180 
pthread_self(void)181 pthread_t pthread_self(void)
182 {
183 	size_t bit;
184 	struct posix_thread *t;
185 
186 	t = (struct posix_thread *)CONTAINER_OF(k_current_get(), struct posix_thread, thread);
187 	bit = posix_thread_to_offset(t);
188 
189 	return mark_pthread_obj_initialized(bit);
190 }
191 
pthread_equal(pthread_t pt1,pthread_t pt2)192 int pthread_equal(pthread_t pt1, pthread_t pt2)
193 {
194 	return (pt1 == pt2);
195 }
196 
__z_pthread_cleanup_init(struct __pthread_cleanup * c,void (* routine)(void * arg),void * arg)197 static inline void __z_pthread_cleanup_init(struct __pthread_cleanup *c, void (*routine)(void *arg),
198 					    void *arg)
199 {
200 	*c = (struct __pthread_cleanup){
201 		.routine = routine,
202 		.arg = arg,
203 		.node = {0},
204 	};
205 }
206 
__z_pthread_cleanup_push(void * cleanup[3],void (* routine)(void * arg),void * arg)207 void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void *arg)
208 {
209 	struct posix_thread *t = NULL;
210 	struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup;
211 
212 	SYS_SEM_LOCK(&pthread_pool_lock) {
213 		t = to_posix_thread(pthread_self());
214 		BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c));
215 		__ASSERT_NO_MSG(t != NULL);
216 		__ASSERT_NO_MSG(c != NULL);
217 		__ASSERT_NO_MSG(routine != NULL);
218 		__z_pthread_cleanup_init(c, routine, arg);
219 		sys_slist_prepend(&t->cleanup_list, &c->node);
220 	}
221 }
222 
__z_pthread_cleanup_pop(int execute)223 void __z_pthread_cleanup_pop(int execute)
224 {
225 	sys_snode_t *node;
226 	struct __pthread_cleanup *c = NULL;
227 	struct posix_thread *t = NULL;
228 
229 	SYS_SEM_LOCK(&pthread_pool_lock) {
230 		t = to_posix_thread(pthread_self());
231 		__ASSERT_NO_MSG(t != NULL);
232 		node = sys_slist_get(&t->cleanup_list);
233 		__ASSERT_NO_MSG(node != NULL);
234 		c = CONTAINER_OF(node, struct __pthread_cleanup, node);
235 		__ASSERT_NO_MSG(c != NULL);
236 		__ASSERT_NO_MSG(c->routine != NULL);
237 	}
238 	if (execute) {
239 		c->routine(c->arg);
240 	}
241 }
242 
is_posix_policy_prio_valid(int priority,int policy)243 static bool is_posix_policy_prio_valid(int priority, int policy)
244 {
245 	if (priority >= posix_sched_priority_min(policy) &&
246 	    priority <= posix_sched_priority_max(policy)) {
247 		return true;
248 	}
249 
250 	LOG_DBG("Invalid priority %d and / or policy %d", priority, policy);
251 
252 	return false;
253 }
254 
255 /* Non-static so that they can be tested in ztest */
zephyr_to_posix_priority(int z_prio,int * policy)256 int zephyr_to_posix_priority(int z_prio, int *policy)
257 {
258 	int priority;
259 
260 	if (z_prio < 0) {
261 		__ASSERT_NO_MSG(-z_prio <= CONFIG_NUM_COOP_PRIORITIES);
262 	} else {
263 		__ASSERT_NO_MSG(z_prio < CONFIG_NUM_PREEMPT_PRIORITIES);
264 	}
265 
266 	*policy = (z_prio < 0) ? SCHED_FIFO : SCHED_RR;
267 	priority = ZEPHYR_TO_POSIX_PRIORITY(z_prio);
268 	__ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, *policy));
269 
270 	return priority;
271 }
272 
273 /* Non-static so that they can be tested in ztest */
posix_to_zephyr_priority(int priority,int policy)274 int posix_to_zephyr_priority(int priority, int policy)
275 {
276 	__ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, policy));
277 
278 	return POSIX_TO_ZEPHYR_PRIORITY(priority, policy);
279 }
280 
__attr_is_runnable(const struct posix_thread_attr * attr)281 static bool __attr_is_runnable(const struct posix_thread_attr *attr)
282 {
283 	size_t stacksize;
284 
285 	if (attr == NULL || attr->stack == NULL) {
286 		LOG_DBG("attr %p is not initialized", attr);
287 		return false;
288 	}
289 
290 	stacksize = __get_attr_stacksize(attr);
291 	if (stacksize < PTHREAD_STACK_MIN) {
292 		LOG_DBG("attr %p has stacksize %zu is smaller than PTHREAD_STACK_MIN (%zu)", attr,
293 			stacksize, (size_t)PTHREAD_STACK_MIN);
294 		return false;
295 	}
296 
297 	/* require a valid scheduler policy */
298 	if (!valid_posix_policy(attr->schedpolicy)) {
299 		LOG_DBG("Invalid scheduler policy %d", attr->schedpolicy);
300 		return false;
301 	}
302 
303 	return true;
304 }
305 
__attr_is_initialized(const struct posix_thread_attr * attr)306 static bool __attr_is_initialized(const struct posix_thread_attr *attr)
307 {
308 	if (IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
309 		return __attr_is_runnable(attr);
310 	}
311 
312 	if (attr == NULL || !attr->initialized) {
313 		LOG_DBG("attr %p is not initialized", attr);
314 		return false;
315 	}
316 
317 	return true;
318 }
319 
320 /**
321  * @brief Set scheduling parameter attributes in thread attributes object.
322  *
323  * See IEEE 1003.1
324  */
pthread_attr_setschedparam(pthread_attr_t * _attr,const struct sched_param * schedparam)325 int pthread_attr_setschedparam(pthread_attr_t *_attr, const struct sched_param *schedparam)
326 {
327 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
328 
329 	if (!__attr_is_initialized(attr) || schedparam == NULL ||
330 	    !is_posix_policy_prio_valid(schedparam->sched_priority, attr->schedpolicy)) {
331 		LOG_DBG("Invalid pthread_attr_t or sched_param");
332 		return EINVAL;
333 	}
334 
335 	attr->priority = schedparam->sched_priority;
336 	return 0;
337 }
338 
339 /**
340  * @brief Set stack attributes in thread attributes object.
341  *
342  * See IEEE 1003.1
343  */
pthread_attr_setstack(pthread_attr_t * _attr,void * stackaddr,size_t stacksize)344 int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksize)
345 {
346 	int ret;
347 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
348 
349 	if (stackaddr == NULL) {
350 		LOG_DBG("NULL stack address");
351 		return EACCES;
352 	}
353 
354 	if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
355 	    stacksize > PTHREAD_STACK_MAX) {
356 		LOG_DBG("Invalid stacksize %zu", stacksize);
357 		return EINVAL;
358 	}
359 
360 	if (attr->stack != NULL) {
361 		ret = k_thread_stack_free(attr->stack);
362 		if (ret == 0) {
363 			LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
364 				__get_attr_stacksize(attr), attr->stack);
365 		}
366 	}
367 
368 	attr->stack = stackaddr;
369 	__set_attr_stacksize(attr, stacksize);
370 
371 	LOG_DBG("Assigned thread stack %zu@%p to attr %p", __get_attr_stacksize(attr), attr->stack,
372 		_attr);
373 
374 	return 0;
375 }
376 
377 /**
378  * @brief Get scope attributes in thread attributes object.
379  *
380  * See IEEE 1003.1
381  */
pthread_attr_getscope(const pthread_attr_t * _attr,int * contentionscope)382 int pthread_attr_getscope(const pthread_attr_t *_attr, int *contentionscope)
383 {
384 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
385 
386 	if (!__attr_is_initialized(attr) || contentionscope == NULL) {
387 		return EINVAL;
388 	}
389 	*contentionscope = attr->contentionscope;
390 	return 0;
391 }
392 
393 /**
394  * @brief Set scope attributes in thread attributes object.
395  *
396  * See IEEE 1003.1
397  */
pthread_attr_setscope(pthread_attr_t * _attr,int contentionscope)398 int pthread_attr_setscope(pthread_attr_t *_attr, int contentionscope)
399 {
400 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
401 
402 	if (!__attr_is_initialized(attr)) {
403 		LOG_DBG("attr %p is not initialized", attr);
404 		return EINVAL;
405 	}
406 	if (!(contentionscope == PTHREAD_SCOPE_PROCESS ||
407 	      contentionscope == PTHREAD_SCOPE_SYSTEM)) {
408 		LOG_DBG("%s contentionscope %d", "Invalid", contentionscope);
409 		return EINVAL;
410 	}
411 	if (contentionscope == PTHREAD_SCOPE_PROCESS) {
412 		/* Zephyr does not yet support processes or process scheduling */
413 		LOG_DBG("%s contentionscope %d", "Unsupported", contentionscope);
414 		return ENOTSUP;
415 	}
416 	attr->contentionscope = contentionscope;
417 	return 0;
418 }
419 
420 /**
421  * @brief Get inherit scheduler attributes in thread attributes object.
422  *
423  * See IEEE 1003.1
424  */
pthread_attr_getinheritsched(const pthread_attr_t * _attr,int * inheritsched)425 int pthread_attr_getinheritsched(const pthread_attr_t *_attr, int *inheritsched)
426 {
427 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
428 
429 	if (!__attr_is_initialized(attr) || inheritsched == NULL) {
430 		return EINVAL;
431 	}
432 	*inheritsched = attr->inheritsched;
433 	return 0;
434 }
435 
436 /**
437  * @brief Set inherit scheduler attributes in thread attributes object.
438  *
439  * See IEEE 1003.1
440  */
pthread_attr_setinheritsched(pthread_attr_t * _attr,int inheritsched)441 int pthread_attr_setinheritsched(pthread_attr_t *_attr, int inheritsched)
442 {
443 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
444 
445 	if (!__attr_is_initialized(attr)) {
446 		LOG_DBG("attr %p is not initialized", attr);
447 		return EINVAL;
448 	}
449 
450 	if (inheritsched != PTHREAD_INHERIT_SCHED && inheritsched != PTHREAD_EXPLICIT_SCHED) {
451 		LOG_DBG("Invalid inheritsched %d", inheritsched);
452 		return EINVAL;
453 	}
454 
455 	attr->inheritsched = inheritsched;
456 	return 0;
457 }
458 
posix_thread_recycle_work_handler(struct k_work * work)459 static void posix_thread_recycle_work_handler(struct k_work *work)
460 {
461 	ARG_UNUSED(work);
462 	posix_thread_recycle();
463 }
464 static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_work_handler);
465 
466 extern struct sys_sem pthread_key_lock;
467 
posix_thread_finalize(struct posix_thread * t,void * retval)468 static void posix_thread_finalize(struct posix_thread *t, void *retval)
469 {
470 	sys_snode_t *node_l, *node_s;
471 	pthread_key_obj *key_obj;
472 	pthread_thread_data *thread_spec_data;
473 	sys_snode_t *node_key_data, *node_key_data_s, *node_key_data_prev = NULL;
474 	struct pthread_key_data *key_data;
475 
476 	SYS_SLIST_FOR_EACH_NODE_SAFE(&t->key_list, node_l, node_s) {
477 		thread_spec_data = (pthread_thread_data *)node_l;
478 		if (thread_spec_data != NULL) {
479 			key_obj = thread_spec_data->key;
480 			if (key_obj->destructor != NULL) {
481 				(key_obj->destructor)(thread_spec_data->spec_data);
482 			}
483 
484 			SYS_SEM_LOCK(&pthread_key_lock) {
485 				SYS_SLIST_FOR_EACH_NODE_SAFE(
486 					&key_obj->key_data_l,
487 					node_key_data,
488 					node_key_data_s) {
489 					key_data = (struct pthread_key_data *)node_key_data;
490 					if (&key_data->thread_data == thread_spec_data) {
491 						sys_slist_remove(
492 							&key_obj->key_data_l,
493 							node_key_data_prev,
494 							node_key_data
495 						);
496 						k_free(key_data);
497 						break;
498 					}
499 					node_key_data_prev = node_key_data;
500 				}
501 			}
502 		}
503 	}
504 
505 	/* move thread from run_q to done_q */
506 	SYS_SEM_LOCK(&pthread_pool_lock) {
507 		sys_dlist_remove(&t->q_node);
508 		posix_thread_q_set(t, POSIX_THREAD_DONE_Q);
509 		t->retval = retval;
510 	}
511 
512 	/* trigger recycle work */
513 	(void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS));
514 
515 	/* abort the underlying k_thread */
516 	k_thread_abort(&t->thread);
517 }
518 
519 FUNC_NORETURN
zephyr_thread_wrapper(void * arg1,void * arg2,void * arg3)520 static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
521 {
522 	int err;
523 	int barrier;
524 	void *(*fun_ptr)(void *arg) = arg2;
525 	struct posix_thread *t = CONTAINER_OF(k_current_get(), struct posix_thread, thread);
526 
527 	if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
528 		/* cross the barrier so that pthread_create() can continue */
529 		barrier = POINTER_TO_UINT(arg3);
530 		err = pthread_barrier_wait(&barrier);
531 		__ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
532 	}
533 
534 	posix_thread_finalize(t, fun_ptr(arg1));
535 
536 	CODE_UNREACHABLE;
537 }
538 
posix_thread_recycle(void)539 static void posix_thread_recycle(void)
540 {
541 	struct posix_thread *t;
542 	struct posix_thread *safe_t;
543 	sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables);
544 
545 	SYS_SEM_LOCK(&pthread_pool_lock) {
546 		SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t,
547 						  q_node) {
548 			if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) {
549 				/* thread has not been joined yet */
550 				continue;
551 			}
552 
553 			sys_dlist_remove(&t->q_node);
554 			sys_dlist_append(&recyclables, &t->q_node);
555 		}
556 	}
557 
558 	if (sys_dlist_is_empty(&recyclables)) {
559 		return;
560 	}
561 
562 	LOG_DBG("Recycling %zu threads", sys_dlist_len(&recyclables));
563 
564 	SYS_DLIST_FOR_EACH_CONTAINER(&recyclables, t, q_node) {
565 		if (t->attr.caller_destroys) {
566 			t->attr = (struct posix_thread_attr){0};
567 		} else {
568 			(void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
569 		}
570 	}
571 
572 	SYS_SEM_LOCK(&pthread_pool_lock) {
573 		while (!sys_dlist_is_empty(&recyclables)) {
574 			t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node);
575 			posix_thread_q_set(t, POSIX_THREAD_READY_Q);
576 		}
577 	}
578 }
579 
580 /**
581  * @brief Create a new thread.
582  *
583  * Pthread attribute should not be NULL. API will return Error on NULL
584  * attribute value.
585  *
586  * See IEEE 1003.1
587  */
pthread_create(pthread_t * th,const pthread_attr_t * _attr,void * (* threadroutine)(void *),void * arg)588 int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadroutine)(void *),
589 		   void *arg)
590 {
591 	int err;
592 	pthread_barrier_t barrier;
593 	struct posix_thread *t = NULL;
594 
595 	if (!(_attr == NULL || __attr_is_runnable((struct posix_thread_attr *)_attr))) {
596 		return EINVAL;
597 	}
598 
599 	/* reclaim resources greedily */
600 	posix_thread_recycle();
601 
602 	SYS_SEM_LOCK(&pthread_pool_lock) {
603 		if (!sys_dlist_is_empty(&posix_thread_q[POSIX_THREAD_READY_Q])) {
604 			t = CONTAINER_OF(sys_dlist_get(&posix_thread_q[POSIX_THREAD_READY_Q]),
605 					 struct posix_thread, q_node);
606 
607 			/* initialize thread state */
608 			posix_thread_q_set(t, POSIX_THREAD_RUN_Q);
609 			sys_slist_init(&t->key_list);
610 			sys_slist_init(&t->cleanup_list);
611 		}
612 	}
613 
614 	if (t != NULL && IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
615 		err = pthread_barrier_init(&barrier, NULL, 2);
616 		if (err != 0) {
617 			/* cannot allocate barrier. move thread back to ready_q */
618 			SYS_SEM_LOCK(&pthread_pool_lock) {
619 				sys_dlist_remove(&t->q_node);
620 				posix_thread_q_set(t, POSIX_THREAD_READY_Q);
621 			}
622 			t = NULL;
623 		}
624 	}
625 
626 	if (t == NULL) {
627 		/* no threads are ready */
628 		LOG_DBG("No threads are ready");
629 		return EAGAIN;
630 	}
631 
632 	if (_attr == NULL) {
633 		err = pthread_attr_init((pthread_attr_t *)&t->attr);
634 		if (err == 0 && !__attr_is_runnable(&t->attr)) {
635 			(void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
636 			err = EINVAL;
637 		}
638 		if (err != 0) {
639 			/* cannot allocate pthread attributes (e.g. stack) */
640 			SYS_SEM_LOCK(&pthread_pool_lock) {
641 				sys_dlist_remove(&t->q_node);
642 				posix_thread_q_set(t, POSIX_THREAD_READY_Q);
643 			}
644 			return err;
645 		}
646 		/* caller not responsible for destroying attr */
647 		t->attr.caller_destroys = false;
648 	} else {
649 		/* copy user-provided attr into thread, caller must destroy attr at a later time */
650 		t->attr = *(struct posix_thread_attr *)_attr;
651 	}
652 
653 	if (t->attr.inheritsched == PTHREAD_INHERIT_SCHED) {
654 		int pol;
655 
656 		t->attr.priority =
657 			zephyr_to_posix_priority(k_thread_priority_get(k_current_get()), &pol);
658 		t->attr.schedpolicy = pol;
659 	}
660 
661 	/* spawn the thread */
662 	k_thread_create(
663 		&t->thread, t->attr.stack, __get_attr_stacksize(&t->attr) + t->attr.guardsize,
664 		zephyr_thread_wrapper, (void *)arg, threadroutine,
665 		IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER) ? UINT_TO_POINTER(barrier) : NULL,
666 		posix_to_zephyr_priority(t->attr.priority, t->attr.schedpolicy), 0, K_NO_WAIT);
667 
668 	if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
669 		/* wait for the spawned thread to cross our barrier */
670 		err = pthread_barrier_wait(&barrier);
671 		__ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
672 		err = pthread_barrier_destroy(&barrier);
673 		__ASSERT_NO_MSG(err == 0);
674 	}
675 
676 	/* finally provide the initialized thread to the caller */
677 	*th = mark_pthread_obj_initialized(posix_thread_to_offset(t));
678 
679 	LOG_DBG("Created pthread %p", &t->thread);
680 
681 	return 0;
682 }
683 
pthread_getconcurrency(void)684 int pthread_getconcurrency(void)
685 {
686 	int ret = 0;
687 
688 	SYS_SEM_LOCK(&pthread_pool_lock) {
689 		ret = pthread_concurrency;
690 	}
691 
692 	return ret;
693 }
694 
pthread_setconcurrency(int new_level)695 int pthread_setconcurrency(int new_level)
696 {
697 	if (new_level < 0) {
698 		return EINVAL;
699 	}
700 
701 	if (new_level > CONFIG_MP_MAX_NUM_CPUS) {
702 		return EAGAIN;
703 	}
704 
705 	SYS_SEM_LOCK(&pthread_pool_lock) {
706 		pthread_concurrency = new_level;
707 	}
708 
709 	return 0;
710 }
711 
712 /**
713  * @brief Set cancelability State.
714  *
715  * See IEEE 1003.1
716  */
pthread_setcancelstate(int state,int * oldstate)717 int pthread_setcancelstate(int state, int *oldstate)
718 {
719 	int ret = EINVAL;
720 	bool cancel_pending = false;
721 	struct posix_thread *t = NULL;
722 	bool cancel_type = -1;
723 
724 	if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) {
725 		LOG_DBG("Invalid pthread state %d", state);
726 		return EINVAL;
727 	}
728 
729 	SYS_SEM_LOCK(&pthread_pool_lock) {
730 		t = to_posix_thread(pthread_self());
731 		if (t == NULL) {
732 			ret = EINVAL;
733 			SYS_SEM_LOCK_BREAK;
734 		}
735 
736 		if (oldstate != NULL) {
737 			*oldstate = t->attr.cancelstate;
738 		}
739 
740 		t->attr.cancelstate = state;
741 		cancel_pending = t->attr.cancelpending;
742 		cancel_type = t->attr.canceltype;
743 
744 		ret = 0;
745 	}
746 
747 	if (ret == 0 && state == PTHREAD_CANCEL_ENABLE &&
748 	    cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS && cancel_pending) {
749 		posix_thread_finalize(t, PTHREAD_CANCELED);
750 	}
751 
752 	return ret;
753 }
754 
755 /**
756  * @brief Set cancelability Type.
757  *
758  * See IEEE 1003.1
759  */
pthread_setcanceltype(int type,int * oldtype)760 int pthread_setcanceltype(int type, int *oldtype)
761 {
762 	int ret = EINVAL;
763 	struct posix_thread *t;
764 
765 	if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS) {
766 		LOG_DBG("Invalid pthread cancel type %d", type);
767 		return EINVAL;
768 	}
769 
770 	SYS_SEM_LOCK(&pthread_pool_lock) {
771 		t = to_posix_thread(pthread_self());
772 		if (t == NULL) {
773 			ret = EINVAL;
774 			SYS_SEM_LOCK_BREAK;
775 		}
776 
777 		if (oldtype != NULL) {
778 			*oldtype = t->attr.canceltype;
779 		}
780 		t->attr.canceltype = type;
781 
782 		ret = 0;
783 	}
784 
785 	return ret;
786 }
787 
788 /**
789  * @brief Create a cancellation point in the calling thread.
790  *
791  * See IEEE 1003.1
792  */
pthread_testcancel(void)793 void pthread_testcancel(void)
794 {
795 	bool cancel_pended = false;
796 	struct posix_thread *t = NULL;
797 
798 	SYS_SEM_LOCK(&pthread_pool_lock) {
799 		t = to_posix_thread(pthread_self());
800 		if (t == NULL) {
801 			SYS_SEM_LOCK_BREAK;
802 		}
803 		if (t->attr.cancelstate != PTHREAD_CANCEL_ENABLE) {
804 			SYS_SEM_LOCK_BREAK;
805 		}
806 		if (t->attr.cancelpending) {
807 			cancel_pended = true;
808 			t->attr.cancelstate = PTHREAD_CANCEL_DISABLE;
809 		}
810 	}
811 
812 	if (cancel_pended) {
813 		posix_thread_finalize(t, PTHREAD_CANCELED);
814 	}
815 }
816 
817 /**
818  * @brief Cancel execution of a thread.
819  *
820  * See IEEE 1003.1
821  */
pthread_cancel(pthread_t pthread)822 int pthread_cancel(pthread_t pthread)
823 {
824 	int ret = ESRCH;
825 	bool cancel_state = PTHREAD_CANCEL_ENABLE;
826 	bool cancel_type = PTHREAD_CANCEL_DEFERRED;
827 	struct posix_thread *t = NULL;
828 
829 	SYS_SEM_LOCK(&pthread_pool_lock) {
830 		t = to_posix_thread(pthread);
831 		if (t == NULL) {
832 			ret = ESRCH;
833 			SYS_SEM_LOCK_BREAK;
834 		}
835 
836 		if (!__attr_is_initialized(&t->attr)) {
837 			/* thread has already terminated */
838 			ret = ESRCH;
839 			SYS_SEM_LOCK_BREAK;
840 		}
841 
842 		ret = 0;
843 		t->attr.cancelpending = true;
844 		cancel_state = t->attr.cancelstate;
845 		cancel_type = t->attr.canceltype;
846 	}
847 
848 	if (ret == 0 && cancel_state == PTHREAD_CANCEL_ENABLE &&
849 	    cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS) {
850 		posix_thread_finalize(t, PTHREAD_CANCELED);
851 	}
852 
853 	return ret;
854 }
855 
856 /**
857  * @brief Set thread scheduling policy and parameters.
858  *
859  * See IEEE 1003.1
860  */
pthread_setschedparam(pthread_t pthread,int policy,const struct sched_param * param)861 int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param)
862 {
863 	int ret = ESRCH;
864 	int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
865 	struct posix_thread *t = NULL;
866 
867 	if (param == NULL || !valid_posix_policy(policy) ||
868 	    !is_posix_policy_prio_valid(param->sched_priority, policy)) {
869 		return EINVAL;
870 	}
871 
872 	SYS_SEM_LOCK(&pthread_pool_lock) {
873 		t = to_posix_thread(pthread);
874 		if (t == NULL) {
875 			ret = ESRCH;
876 			SYS_SEM_LOCK_BREAK;
877 		}
878 
879 		ret = 0;
880 		new_prio = posix_to_zephyr_priority(param->sched_priority, policy);
881 	}
882 
883 	if (ret == 0) {
884 		k_thread_priority_set(&t->thread, new_prio);
885 	}
886 
887 	return ret;
888 }
889 
890 /**
891  * @brief Set thread scheduling priority.
892  *
893  * See IEEE 1003.1
894  */
pthread_setschedprio(pthread_t thread,int prio)895 int pthread_setschedprio(pthread_t thread, int prio)
896 {
897 	int ret;
898 	int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
899 	struct posix_thread *t = NULL;
900 	int policy = -1;
901 	struct sched_param param;
902 
903 	ret = pthread_getschedparam(thread, &policy, &param);
904 	if (ret != 0) {
905 		return ret;
906 	}
907 
908 	if (!is_posix_policy_prio_valid(prio, policy)) {
909 		return EINVAL;
910 	}
911 
912 	ret = ESRCH;
913 	SYS_SEM_LOCK(&pthread_pool_lock) {
914 		t = to_posix_thread(thread);
915 		if (t == NULL) {
916 			ret = ESRCH;
917 			SYS_SEM_LOCK_BREAK;
918 		}
919 
920 		ret = 0;
921 		new_prio = posix_to_zephyr_priority(prio, policy);
922 	}
923 
924 	if (ret == 0) {
925 		k_thread_priority_set(&t->thread, new_prio);
926 	}
927 
928 	return ret;
929 }
930 
931 /**
932  * @brief Initialise threads attribute object
933  *
934  * See IEEE 1003.1
935  */
pthread_attr_init(pthread_attr_t * _attr)936 int pthread_attr_init(pthread_attr_t *_attr)
937 {
938 	struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
939 
940 	if (attr == NULL) {
941 		LOG_DBG("Invalid attr pointer");
942 		return ENOMEM;
943 	}
944 
945 	BUILD_ASSERT(DYNAMIC_STACK_SIZE <= PTHREAD_STACK_MAX);
946 
947 	*attr = (struct posix_thread_attr){0};
948 	attr->guardsize = CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT;
949 	attr->contentionscope = PTHREAD_SCOPE_SYSTEM;
950 	attr->inheritsched = PTHREAD_INHERIT_SCHED;
951 
952 	if (DYNAMIC_STACK_SIZE > 0) {
953 		attr->stack = k_thread_stack_alloc(DYNAMIC_STACK_SIZE + attr->guardsize,
954 						   k_is_user_context() ? K_USER : 0);
955 		if (attr->stack == NULL) {
956 			LOG_DBG("Did not auto-allocate thread stack");
957 		} else {
958 			__set_attr_stacksize(attr, DYNAMIC_STACK_SIZE);
959 			__ASSERT_NO_MSG(__attr_is_initialized(attr));
960 			LOG_DBG("Allocated thread stack %zu@%p", __get_attr_stacksize(attr),
961 				attr->stack);
962 		}
963 	}
964 
965 	/* caller responsible for destroying attr */
966 	attr->initialized = true;
967 
968 	LOG_DBG("Initialized attr %p", _attr);
969 
970 	return 0;
971 }
972 
973 /**
974  * @brief Get thread scheduling policy and parameters
975  *
976  * See IEEE 1003.1
977  */
pthread_getschedparam(pthread_t pthread,int * policy,struct sched_param * param)978 int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
979 {
980 	int ret = ESRCH;
981 	struct posix_thread *t;
982 
983 	if (policy == NULL || param == NULL) {
984 		return EINVAL;
985 	}
986 
987 	SYS_SEM_LOCK(&pthread_pool_lock) {
988 		t = to_posix_thread(pthread);
989 		if (t == NULL) {
990 			ret = ESRCH;
991 			SYS_SEM_LOCK_BREAK;
992 		}
993 
994 		if (!__attr_is_initialized(&t->attr)) {
995 			ret = ESRCH;
996 			SYS_SEM_LOCK_BREAK;
997 		}
998 
999 		ret = 0;
1000 		param->sched_priority =
1001 			zephyr_to_posix_priority(k_thread_priority_get(&t->thread), policy);
1002 	}
1003 
1004 	return ret;
1005 }
1006 
1007 /**
1008  * @brief Dynamic package initialization
1009  *
1010  * See IEEE 1003.1
1011  */
pthread_once(pthread_once_t * once,void (* init_func)(void))1012 int pthread_once(pthread_once_t *once, void (*init_func)(void))
1013 {
1014 	int ret = EINVAL;
1015 	bool run_init_func = false;
1016 	struct pthread_once *const _once = (struct pthread_once *)once;
1017 
1018 	if (init_func == NULL) {
1019 		return EINVAL;
1020 	}
1021 
1022 	SYS_SEM_LOCK(&pthread_pool_lock) {
1023 		if (!_once->flag) {
1024 			run_init_func = true;
1025 			_once->flag = true;
1026 		}
1027 		ret = 0;
1028 	}
1029 
1030 	if (ret == 0 && run_init_func) {
1031 		init_func();
1032 	}
1033 
1034 	return ret;
1035 }
1036 
1037 /**
1038  * @brief Terminate calling thread.
1039  *
1040  * See IEEE 1003.1
1041  */
1042 FUNC_NORETURN
pthread_exit(void * retval)1043 void pthread_exit(void *retval)
1044 {
1045 	struct posix_thread *self = NULL;
1046 
1047 	SYS_SEM_LOCK(&pthread_pool_lock) {
1048 		self = to_posix_thread(pthread_self());
1049 		if (self == NULL) {
1050 			SYS_SEM_LOCK_BREAK;
1051 		}
1052 
1053 		/* Mark a thread as cancellable before exiting */
1054 		self->attr.cancelstate = PTHREAD_CANCEL_ENABLE;
1055 	}
1056 
1057 	if (self == NULL) {
1058 		/* not a valid posix_thread */
1059 		LOG_DBG("Aborting non-pthread %p", k_current_get());
1060 		k_thread_abort(k_current_get());
1061 
1062 		CODE_UNREACHABLE;
1063 	}
1064 
1065 	posix_thread_finalize(self, retval);
1066 	CODE_UNREACHABLE;
1067 }
1068 
pthread_timedjoin_internal(pthread_t pthread,void ** status,k_timeout_t timeout)1069 static int pthread_timedjoin_internal(pthread_t pthread, void **status, k_timeout_t timeout)
1070 {
1071 	int ret = ESRCH;
1072 	struct posix_thread *t = NULL;
1073 
1074 	if (pthread == pthread_self()) {
1075 		LOG_DBG("Pthread attempted to join itself (%x)", pthread);
1076 		return EDEADLK;
1077 	}
1078 
1079 	SYS_SEM_LOCK(&pthread_pool_lock) {
1080 		t = to_posix_thread(pthread);
1081 		if (t == NULL) {
1082 			ret = ESRCH;
1083 			SYS_SEM_LOCK_BREAK;
1084 		}
1085 
1086 		LOG_DBG("Pthread %p joining..", &t->thread);
1087 
1088 		if (t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1089 			/* undefined behaviour */
1090 			ret = EINVAL;
1091 			SYS_SEM_LOCK_BREAK;
1092 		}
1093 
1094 		if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q) {
1095 			ret = ESRCH;
1096 			SYS_SEM_LOCK_BREAK;
1097 		}
1098 
1099 		/*
1100 		 * thread is joinable and is in run_q or done_q.
1101 		 * let's ensure that the thread cannot be joined again after this point.
1102 		 */
1103 		ret = 0;
1104 		t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1105 	}
1106 
1107 	switch (ret) {
1108 	case ESRCH:
1109 		LOG_DBG("Pthread %p has already been joined", &t->thread);
1110 		return ret;
1111 	case EINVAL:
1112 		LOG_DBG("Pthread %p is not a joinable", &t->thread);
1113 		return ret;
1114 	case 0:
1115 		break;
1116 	}
1117 
1118 	ret = k_thread_join(&t->thread, timeout);
1119 	if (ret != 0) {
1120 		/* when joining failed, ensure that the thread can be joined later */
1121 		SYS_SEM_LOCK(&pthread_pool_lock) {
1122 			t->attr.detachstate = PTHREAD_CREATE_JOINABLE;
1123 		}
1124 	}
1125 	if (ret == -EBUSY) {
1126 		return EBUSY;
1127 	} else if (ret == -EAGAIN) {
1128 		return ETIMEDOUT;
1129 	}
1130 	/* Can only be ok or -EDEADLK, which should never occur for pthreads */
1131 	__ASSERT_NO_MSG(ret == 0);
1132 
1133 	LOG_DBG("Joined pthread %p", &t->thread);
1134 
1135 	if (status != NULL) {
1136 		LOG_DBG("Writing status to %p", status);
1137 		*status = t->retval;
1138 	}
1139 
1140 	posix_thread_recycle();
1141 
1142 	return 0;
1143 }
1144 
1145 /**
1146  * @brief Await a thread termination with timeout.
1147  *
1148  * Non-portable GNU extension of IEEE 1003.1
1149  */
pthread_timedjoin_np(pthread_t pthread,void ** status,const struct timespec * abstime)1150 int pthread_timedjoin_np(pthread_t pthread, void **status, const struct timespec *abstime)
1151 {
1152 	if (abstime == NULL) {
1153 		return EINVAL;
1154 	}
1155 
1156 	if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= NSEC_PER_SEC) {
1157 		return EINVAL;
1158 	}
1159 
1160 	return pthread_timedjoin_internal(pthread, status, K_MSEC(timespec_to_timeoutms(abstime)));
1161 }
1162 
1163 /**
1164  * @brief Check a thread for termination.
1165  *
1166  * Non-portable GNU extension of IEEE 1003.1
1167  */
pthread_tryjoin_np(pthread_t pthread,void ** status)1168 int pthread_tryjoin_np(pthread_t pthread, void **status)
1169 {
1170 	return pthread_timedjoin_internal(pthread, status, K_NO_WAIT);
1171 }
1172 
1173 /**
1174  * @brief Await a thread termination.
1175  *
1176  * See IEEE 1003.1
1177  */
pthread_join(pthread_t pthread,void ** status)1178 int pthread_join(pthread_t pthread, void **status)
1179 {
1180 	return pthread_timedjoin_internal(pthread, status, K_FOREVER);
1181 }
1182 
1183 /**
1184  * @brief Detach a thread.
1185  *
1186  * See IEEE 1003.1
1187  */
pthread_detach(pthread_t pthread)1188 int pthread_detach(pthread_t pthread)
1189 {
1190 	int ret = ESRCH;
1191 	struct posix_thread *t = NULL;
1192 
1193 	SYS_SEM_LOCK(&pthread_pool_lock) {
1194 		t = to_posix_thread(pthread);
1195 		if (t == NULL) {
1196 			ret = ESRCH;
1197 			SYS_SEM_LOCK_BREAK;
1198 		}
1199 
1200 		if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
1201 		    t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1202 			LOG_DBG("Pthread %p cannot be detached", &t->thread);
1203 			ret = EINVAL;
1204 			SYS_SEM_LOCK_BREAK;
1205 		}
1206 
1207 		ret = 0;
1208 		t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1209 	}
1210 
1211 	if (ret == 0) {
1212 		LOG_DBG("Pthread %p detached", &t->thread);
1213 	}
1214 
1215 	return ret;
1216 }
1217 
1218 /**
1219  * @brief Get detach state attribute in thread attributes object.
1220  *
1221  * See IEEE 1003.1
1222  */
pthread_attr_getdetachstate(const pthread_attr_t * _attr,int * detachstate)1223 int pthread_attr_getdetachstate(const pthread_attr_t *_attr, int *detachstate)
1224 {
1225 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1226 
1227 	if (!__attr_is_initialized(attr) || (detachstate == NULL)) {
1228 		return EINVAL;
1229 	}
1230 
1231 	*detachstate = attr->detachstate;
1232 	return 0;
1233 }
1234 
1235 /**
1236  * @brief Set detach state attribute in thread attributes object.
1237  *
1238  * See IEEE 1003.1
1239  */
pthread_attr_setdetachstate(pthread_attr_t * _attr,int detachstate)1240 int pthread_attr_setdetachstate(pthread_attr_t *_attr, int detachstate)
1241 {
1242 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1243 
1244 	if (!__attr_is_initialized(attr) || ((detachstate != PTHREAD_CREATE_DETACHED) &&
1245 					     (detachstate != PTHREAD_CREATE_JOINABLE))) {
1246 		return EINVAL;
1247 	}
1248 
1249 	attr->detachstate = detachstate;
1250 	return 0;
1251 }
1252 
1253 /**
1254  * @brief Get scheduling policy attribute in Thread attributes.
1255  *
1256  * See IEEE 1003.1
1257  */
pthread_attr_getschedpolicy(const pthread_attr_t * _attr,int * policy)1258 int pthread_attr_getschedpolicy(const pthread_attr_t *_attr, int *policy)
1259 {
1260 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1261 
1262 	if (!__attr_is_initialized(attr) || (policy == NULL)) {
1263 		return EINVAL;
1264 	}
1265 
1266 	*policy = attr->schedpolicy;
1267 	return 0;
1268 }
1269 
1270 /**
1271  * @brief Set scheduling policy attribute in Thread attributes object.
1272  *
1273  * See IEEE 1003.1
1274  */
pthread_attr_setschedpolicy(pthread_attr_t * _attr,int policy)1275 int pthread_attr_setschedpolicy(pthread_attr_t *_attr, int policy)
1276 {
1277 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1278 
1279 	if (!__attr_is_initialized(attr) || !valid_posix_policy(policy)) {
1280 		return EINVAL;
1281 	}
1282 
1283 	attr->schedpolicy = policy;
1284 	return 0;
1285 }
1286 
1287 /**
1288  * @brief Get stack size attribute in thread attributes object.
1289  *
1290  * See IEEE 1003.1
1291  */
pthread_attr_getstacksize(const pthread_attr_t * _attr,size_t * stacksize)1292 int pthread_attr_getstacksize(const pthread_attr_t *_attr, size_t *stacksize)
1293 {
1294 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1295 
1296 	if (!__attr_is_initialized(attr) || (stacksize == NULL)) {
1297 		return EINVAL;
1298 	}
1299 
1300 	*stacksize = __get_attr_stacksize(attr);
1301 	return 0;
1302 }
1303 
1304 /**
1305  * @brief Set stack size attribute in thread attributes object.
1306  *
1307  * See IEEE 1003.1
1308  */
pthread_attr_setstacksize(pthread_attr_t * _attr,size_t stacksize)1309 int pthread_attr_setstacksize(pthread_attr_t *_attr, size_t stacksize)
1310 {
1311 	int ret;
1312 	void *new_stack;
1313 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1314 
1315 	if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
1316 	    stacksize > PTHREAD_STACK_MAX) {
1317 		return EINVAL;
1318 	}
1319 
1320 	if (__get_attr_stacksize(attr) == stacksize) {
1321 		return 0;
1322 	}
1323 
1324 	new_stack =
1325 		k_thread_stack_alloc(stacksize + attr->guardsize, k_is_user_context() ? K_USER : 0);
1326 	if (new_stack == NULL) {
1327 		if (stacksize < __get_attr_stacksize(attr)) {
1328 			__set_attr_stacksize(attr, stacksize);
1329 			return 0;
1330 		}
1331 
1332 		LOG_DBG("k_thread_stack_alloc(%zu) failed",
1333 			__get_attr_stacksize(attr) + attr->guardsize);
1334 		return ENOMEM;
1335 	}
1336 	LOG_DBG("Allocated thread stack %zu@%p", stacksize + attr->guardsize, new_stack);
1337 
1338 	if (attr->stack != NULL) {
1339 		ret = k_thread_stack_free(attr->stack);
1340 		if (ret == 0) {
1341 			LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
1342 				__get_attr_stacksize(attr), attr->stack);
1343 		}
1344 	}
1345 
1346 	__set_attr_stacksize(attr, stacksize);
1347 	attr->stack = new_stack;
1348 
1349 	return 0;
1350 }
1351 
1352 /**
1353  * @brief Get stack attributes in thread attributes object.
1354  *
1355  * See IEEE 1003.1
1356  */
pthread_attr_getstack(const pthread_attr_t * _attr,void ** stackaddr,size_t * stacksize)1357 int pthread_attr_getstack(const pthread_attr_t *_attr, void **stackaddr, size_t *stacksize)
1358 {
1359 	const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1360 
1361 	if (!__attr_is_initialized(attr) || (stackaddr == NULL) || (stacksize == NULL)) {
1362 		return EINVAL;
1363 	}
1364 
1365 	*stackaddr = attr->stack;
1366 	*stacksize = __get_attr_stacksize(attr);
1367 	return 0;
1368 }
1369 
pthread_attr_getguardsize(const pthread_attr_t * ZRESTRICT _attr,size_t * ZRESTRICT guardsize)1370 int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT _attr, size_t *ZRESTRICT guardsize)
1371 {
1372 	struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1373 
1374 	if (!__attr_is_initialized(attr) || guardsize == NULL) {
1375 		return EINVAL;
1376 	}
1377 
1378 	*guardsize = attr->guardsize;
1379 
1380 	return 0;
1381 }
1382 
pthread_attr_setguardsize(pthread_attr_t * _attr,size_t guardsize)1383 int pthread_attr_setguardsize(pthread_attr_t *_attr, size_t guardsize)
1384 {
1385 	struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1386 
1387 	if (!__attr_is_initialized(attr) || guardsize > PTHREAD_GUARD_MAX) {
1388 		return EINVAL;
1389 	}
1390 
1391 	attr->guardsize = guardsize;
1392 
1393 	return 0;
1394 }
1395 
1396 /**
1397  * @brief Get thread attributes object scheduling parameters.
1398  *
1399  * See IEEE 1003.1
1400  */
pthread_attr_getschedparam(const pthread_attr_t * _attr,struct sched_param * schedparam)1401 int pthread_attr_getschedparam(const pthread_attr_t *_attr, struct sched_param *schedparam)
1402 {
1403 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1404 
1405 	if (!__attr_is_initialized(attr) || (schedparam == NULL)) {
1406 		return EINVAL;
1407 	}
1408 
1409 	schedparam->sched_priority = attr->priority;
1410 	return 0;
1411 }
1412 
1413 /**
1414  * @brief Destroy thread attributes object.
1415  *
1416  * See IEEE 1003.1
1417  */
pthread_attr_destroy(pthread_attr_t * _attr)1418 int pthread_attr_destroy(pthread_attr_t *_attr)
1419 {
1420 	int ret;
1421 	struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1422 
1423 	if (!__attr_is_initialized(attr)) {
1424 		return EINVAL;
1425 	}
1426 
1427 	ret = k_thread_stack_free(attr->stack);
1428 	if (ret == 0) {
1429 		LOG_DBG("Freed attr %p thread stack %zu@%p", _attr, __get_attr_stacksize(attr),
1430 			attr->stack);
1431 	}
1432 
1433 	*attr = (struct posix_thread_attr){0};
1434 	LOG_DBG("Destroyed attr %p", _attr);
1435 
1436 	return 0;
1437 }
1438 
pthread_setname_np(pthread_t thread,const char * name)1439 int pthread_setname_np(pthread_t thread, const char *name)
1440 {
1441 #ifdef CONFIG_THREAD_NAME
1442 	k_tid_t kthread;
1443 
1444 	thread = get_posix_thread_idx(thread);
1445 	if (thread >= ARRAY_SIZE(posix_thread_pool)) {
1446 		return ESRCH;
1447 	}
1448 
1449 	kthread = &posix_thread_pool[thread].thread;
1450 
1451 	if (name == NULL) {
1452 		return EINVAL;
1453 	}
1454 
1455 	return k_thread_name_set(kthread, name);
1456 #else
1457 	ARG_UNUSED(thread);
1458 	ARG_UNUSED(name);
1459 	return 0;
1460 #endif
1461 }
1462 
pthread_getname_np(pthread_t thread,char * name,size_t len)1463 int pthread_getname_np(pthread_t thread, char *name, size_t len)
1464 {
1465 #ifdef CONFIG_THREAD_NAME
1466 	k_tid_t kthread;
1467 
1468 	thread = get_posix_thread_idx(thread);
1469 	if (thread >= ARRAY_SIZE(posix_thread_pool)) {
1470 		return ESRCH;
1471 	}
1472 
1473 	if (name == NULL) {
1474 		return EINVAL;
1475 	}
1476 
1477 	memset(name, '\0', len);
1478 	kthread = &posix_thread_pool[thread].thread;
1479 	return k_thread_name_copy(kthread, name, len - 1);
1480 #else
1481 	ARG_UNUSED(thread);
1482 	ARG_UNUSED(name);
1483 	ARG_UNUSED(len);
1484 	return 0;
1485 #endif
1486 }
1487 
pthread_atfork(void (* prepare)(void),void (* parent)(void),void (* child)(void))1488 int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
1489 {
1490 	ARG_UNUSED(prepare);
1491 	ARG_UNUSED(parent);
1492 	ARG_UNUSED(child);
1493 
1494 	return ENOSYS;
1495 }
1496 
1497 /* this should probably go into signal.c but we need access to the lock */
pthread_sigmask(int how,const sigset_t * ZRESTRICT set,sigset_t * ZRESTRICT oset)1498 int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT oset)
1499 {
1500 	int ret = ESRCH;
1501 	struct posix_thread *t = NULL;
1502 
1503 	if (!(how == SIG_BLOCK || how == SIG_SETMASK || how == SIG_UNBLOCK)) {
1504 		return EINVAL;
1505 	}
1506 
1507 	SYS_SEM_LOCK(&pthread_pool_lock) {
1508 		t = to_posix_thread(pthread_self());
1509 		if (t == NULL) {
1510 			ret = ESRCH;
1511 			SYS_SEM_LOCK_BREAK;
1512 		}
1513 
1514 		if (oset != NULL) {
1515 			*oset = t->sigset;
1516 		}
1517 
1518 		ret = 0;
1519 		if (set == NULL) {
1520 			SYS_SEM_LOCK_BREAK;
1521 		}
1522 
1523 		switch (how) {
1524 		case SIG_BLOCK:
1525 			for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1526 				t->sigset.sig[i] |= set->sig[i];
1527 			}
1528 			break;
1529 		case SIG_SETMASK:
1530 			t->sigset = *set;
1531 			break;
1532 		case SIG_UNBLOCK:
1533 			for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1534 				t->sigset.sig[i] &= ~set->sig[i];
1535 			}
1536 			break;
1537 		}
1538 	}
1539 
1540 	return ret;
1541 }
1542 
1543 __boot_func
posix_thread_pool_init(void)1544 static int posix_thread_pool_init(void)
1545 {
1546 	ARRAY_FOR_EACH_PTR(posix_thread_pool, th) {
1547 		posix_thread_q_set(th, POSIX_THREAD_READY_Q);
1548 	}
1549 
1550 	return 0;
1551 }
1552 SYS_INIT(posix_thread_pool_init, PRE_KERNEL_1, 0);
1553