1 /*
2 * Copyright (c) 2018 Intel Corporation
3 * Copyright (c) 2023 Meta
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include "posix_internal.h"
9 #include "pthread_sched.h"
10
11 #include <stdio.h>
12
13 #include <zephyr/init.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/logging/log.h>
16 #include <zephyr/sys/atomic.h>
17 #include <zephyr/posix/pthread.h>
18 #include <zephyr/posix/unistd.h>
19 #include <zephyr/sys/slist.h>
20 #include <zephyr/sys/util.h>
21
22 #define ZEPHYR_TO_POSIX_PRIORITY(_zprio) \
23 (((_zprio) < 0) ? (-1 * ((_zprio) + 1)) : (CONFIG_NUM_PREEMPT_PRIORITIES - (_zprio)-1))
24
25 #define POSIX_TO_ZEPHYR_PRIORITY(_prio, _pol) \
26 (((_pol) == SCHED_FIFO) ? (-1 * ((_prio) + 1)) \
27 : (CONFIG_NUM_PREEMPT_PRIORITIES - (_prio)-1))
28
29 #define DEFAULT_PTHREAD_PRIORITY \
30 POSIX_TO_ZEPHYR_PRIORITY(K_LOWEST_APPLICATION_THREAD_PRIO, DEFAULT_PTHREAD_POLICY)
31 #define DEFAULT_PTHREAD_POLICY (IS_ENABLED(CONFIG_PREEMPT_ENABLED) ? SCHED_RR : SCHED_FIFO)
32
33 #define PTHREAD_STACK_MAX BIT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS)
34 #define PTHREAD_GUARD_MAX BIT_MASK(CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS)
35
36 LOG_MODULE_REGISTER(pthread, CONFIG_PTHREAD_LOG_LEVEL);
37
38 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
39 #define DYNAMIC_STACK_SIZE CONFIG_DYNAMIC_THREAD_STACK_SIZE
40 #else
41 #define DYNAMIC_STACK_SIZE 0
42 #endif
43
__get_attr_stacksize(const struct posix_thread_attr * attr)44 static inline size_t __get_attr_stacksize(const struct posix_thread_attr *attr)
45 {
46 return attr->stacksize + 1;
47 }
48
__set_attr_stacksize(struct posix_thread_attr * attr,size_t stacksize)49 static inline void __set_attr_stacksize(struct posix_thread_attr *attr, size_t stacksize)
50 {
51 attr->stacksize = stacksize - 1;
52 }
53
54 struct __pthread_cleanup {
55 void (*routine)(void *arg);
56 void *arg;
57 sys_snode_t node;
58 };
59
60 enum posix_thread_qid {
61 /* ready to be started via pthread_create() */
62 POSIX_THREAD_READY_Q,
63 /* running */
64 POSIX_THREAD_RUN_Q,
65 /* exited (either joinable or detached) */
66 POSIX_THREAD_DONE_Q,
67 /* invalid */
68 POSIX_THREAD_INVALID_Q,
69 };
70
71 /* only 2 bits in struct posix_thread_attr for schedpolicy */
72 BUILD_ASSERT(SCHED_OTHER < BIT(2) && SCHED_FIFO < BIT(2) && SCHED_RR < BIT(2));
73
74 BUILD_ASSERT((PTHREAD_CREATE_DETACHED == 0 || PTHREAD_CREATE_JOINABLE == 0) &&
75 (PTHREAD_CREATE_DETACHED == 1 || PTHREAD_CREATE_JOINABLE == 1));
76
77 BUILD_ASSERT((PTHREAD_CANCEL_ENABLE == 0 || PTHREAD_CANCEL_DISABLE == 0) &&
78 (PTHREAD_CANCEL_ENABLE == 1 || PTHREAD_CANCEL_DISABLE == 1));
79
80 BUILD_ASSERT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS + CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS <=
81 32);
82
83 static void posix_thread_recycle(void);
84 static sys_dlist_t posix_thread_q[] = {
85 SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_READY_Q]),
86 SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_RUN_Q]),
87 SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_DONE_Q]),
88 };
89 static struct posix_thread posix_thread_pool[CONFIG_POSIX_THREAD_THREADS_MAX];
90 static struct k_spinlock pthread_pool_lock;
91 static int pthread_concurrency;
92
posix_thread_q_set(struct posix_thread * t,enum posix_thread_qid qid)93 static inline void posix_thread_q_set(struct posix_thread *t, enum posix_thread_qid qid)
94 {
95 switch (qid) {
96 case POSIX_THREAD_READY_Q:
97 case POSIX_THREAD_RUN_Q:
98 case POSIX_THREAD_DONE_Q:
99 sys_dlist_append(&posix_thread_q[qid], &t->q_node);
100 t->qid = qid;
101 break;
102 default:
103 __ASSERT(false, "cannot set invalid qid %d for posix thread %p", qid, t);
104 break;
105 }
106 }
107
posix_thread_q_get(struct posix_thread * t)108 static inline enum posix_thread_qid posix_thread_q_get(struct posix_thread *t)
109 {
110 switch (t->qid) {
111 case POSIX_THREAD_READY_Q:
112 case POSIX_THREAD_RUN_Q:
113 case POSIX_THREAD_DONE_Q:
114 return t->qid;
115 default:
116 __ASSERT(false, "posix thread %p has invalid qid: %d", t, t->qid);
117 return POSIX_THREAD_INVALID_Q;
118 }
119 }
120
121 /*
122 * We reserve the MSB to mark a pthread_t as initialized (from the
123 * perspective of the application). With a linear space, this means that
124 * the theoretical pthread_t range is [0,2147483647].
125 */
126 BUILD_ASSERT(CONFIG_POSIX_THREAD_THREADS_MAX < PTHREAD_OBJ_MASK_INIT,
127 "CONFIG_POSIX_THREAD_THREADS_MAX is too high");
128
posix_thread_to_offset(struct posix_thread * t)129 static inline size_t posix_thread_to_offset(struct posix_thread *t)
130 {
131 return t - posix_thread_pool;
132 }
133
get_posix_thread_idx(pthread_t pth)134 static inline size_t get_posix_thread_idx(pthread_t pth)
135 {
136 return mark_pthread_obj_uninitialized(pth);
137 }
138
to_posix_thread(pthread_t pthread)139 struct posix_thread *to_posix_thread(pthread_t pthread)
140 {
141 struct posix_thread *t;
142 bool actually_initialized;
143 size_t bit = get_posix_thread_idx(pthread);
144
145 /* if the provided thread does not claim to be initialized, its invalid */
146 if (!is_pthread_obj_initialized(pthread)) {
147 LOG_DBG("pthread is not initialized (%x)", pthread);
148 return NULL;
149 }
150
151 if (bit >= ARRAY_SIZE(posix_thread_pool)) {
152 LOG_DBG("Invalid pthread (%x)", pthread);
153 return NULL;
154 }
155
156 t = &posix_thread_pool[bit];
157
158 /*
159 * Denote a pthread as "initialized" (i.e. allocated) if it is not in ready_q.
160 * This differs from other posix object allocation strategies because they use
161 * a bitarray to indicate whether an object has been allocated.
162 */
163 actually_initialized = !(posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
164 (posix_thread_q_get(t) == POSIX_THREAD_DONE_Q &&
165 t->attr.detachstate == PTHREAD_CREATE_DETACHED));
166
167 if (!actually_initialized) {
168 LOG_DBG("Pthread claims to be initialized (%x)", pthread);
169 return NULL;
170 }
171
172 return &posix_thread_pool[bit];
173 }
174
pthread_self(void)175 pthread_t pthread_self(void)
176 {
177 size_t bit;
178 struct posix_thread *t;
179
180 t = (struct posix_thread *)CONTAINER_OF(k_current_get(), struct posix_thread, thread);
181 bit = posix_thread_to_offset(t);
182
183 return mark_pthread_obj_initialized(bit);
184 }
185
pthread_equal(pthread_t pt1,pthread_t pt2)186 int pthread_equal(pthread_t pt1, pthread_t pt2)
187 {
188 return (pt1 == pt2);
189 }
190
__z_pthread_cleanup_init(struct __pthread_cleanup * c,void (* routine)(void * arg),void * arg)191 static inline void __z_pthread_cleanup_init(struct __pthread_cleanup *c, void (*routine)(void *arg),
192 void *arg)
193 {
194 *c = (struct __pthread_cleanup){
195 .routine = routine,
196 .arg = arg,
197 .node = {0},
198 };
199 }
200
__z_pthread_cleanup_push(void * cleanup[3],void (* routine)(void * arg),void * arg)201 void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void *arg)
202 {
203 struct posix_thread *t = NULL;
204 struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup;
205
206 K_SPINLOCK(&pthread_pool_lock) {
207 t = to_posix_thread(pthread_self());
208 BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c));
209 __ASSERT_NO_MSG(t != NULL);
210 __ASSERT_NO_MSG(c != NULL);
211 __ASSERT_NO_MSG(routine != NULL);
212 __z_pthread_cleanup_init(c, routine, arg);
213 sys_slist_prepend(&t->cleanup_list, &c->node);
214 }
215 }
216
__z_pthread_cleanup_pop(int execute)217 void __z_pthread_cleanup_pop(int execute)
218 {
219 sys_snode_t *node;
220 struct __pthread_cleanup *c = NULL;
221 struct posix_thread *t = NULL;
222
223 K_SPINLOCK(&pthread_pool_lock) {
224 t = to_posix_thread(pthread_self());
225 __ASSERT_NO_MSG(t != NULL);
226 node = sys_slist_get(&t->cleanup_list);
227 __ASSERT_NO_MSG(node != NULL);
228 c = CONTAINER_OF(node, struct __pthread_cleanup, node);
229 __ASSERT_NO_MSG(c != NULL);
230 __ASSERT_NO_MSG(c->routine != NULL);
231 }
232 if (execute) {
233 c->routine(c->arg);
234 }
235 }
236
is_posix_policy_prio_valid(int priority,int policy)237 static bool is_posix_policy_prio_valid(int priority, int policy)
238 {
239 if (priority >= posix_sched_priority_min(policy) &&
240 priority <= posix_sched_priority_max(policy)) {
241 return true;
242 }
243
244 LOG_DBG("Invalid priority %d and / or policy %d", priority, policy);
245
246 return false;
247 }
248
249 /* Non-static so that they can be tested in ztest */
zephyr_to_posix_priority(int z_prio,int * policy)250 int zephyr_to_posix_priority(int z_prio, int *policy)
251 {
252 int priority;
253
254 if (z_prio < 0) {
255 __ASSERT_NO_MSG(-z_prio <= CONFIG_NUM_COOP_PRIORITIES);
256 } else {
257 __ASSERT_NO_MSG(z_prio < CONFIG_NUM_PREEMPT_PRIORITIES);
258 }
259
260 *policy = (z_prio < 0) ? SCHED_FIFO : SCHED_RR;
261 priority = ZEPHYR_TO_POSIX_PRIORITY(z_prio);
262 __ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, *policy));
263
264 return priority;
265 }
266
267 /* Non-static so that they can be tested in ztest */
posix_to_zephyr_priority(int priority,int policy)268 int posix_to_zephyr_priority(int priority, int policy)
269 {
270 __ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, policy));
271
272 return POSIX_TO_ZEPHYR_PRIORITY(priority, policy);
273 }
274
__attr_is_runnable(const struct posix_thread_attr * attr)275 static bool __attr_is_runnable(const struct posix_thread_attr *attr)
276 {
277 size_t stacksize;
278
279 if (attr == NULL || attr->stack == NULL) {
280 LOG_DBG("attr %p is not initialized", attr);
281 return false;
282 }
283
284 stacksize = __get_attr_stacksize(attr);
285 if (stacksize < PTHREAD_STACK_MIN) {
286 LOG_DBG("attr %p has stacksize %zu is smaller than PTHREAD_STACK_MIN (%zu)", attr,
287 stacksize, (size_t)PTHREAD_STACK_MIN);
288 return false;
289 }
290
291 /* require a valid scheduler policy */
292 if (!valid_posix_policy(attr->schedpolicy)) {
293 LOG_DBG("Invalid scheduler policy %d", attr->schedpolicy);
294 return false;
295 }
296
297 return true;
298 }
299
__attr_is_initialized(const struct posix_thread_attr * attr)300 static bool __attr_is_initialized(const struct posix_thread_attr *attr)
301 {
302 if (IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
303 return __attr_is_runnable(attr);
304 }
305
306 if (attr == NULL || !attr->initialized) {
307 LOG_DBG("attr %p is not initialized", attr);
308 return false;
309 }
310
311 return true;
312 }
313
314 /**
315 * @brief Set scheduling parameter attributes in thread attributes object.
316 *
317 * See IEEE 1003.1
318 */
pthread_attr_setschedparam(pthread_attr_t * _attr,const struct sched_param * schedparam)319 int pthread_attr_setschedparam(pthread_attr_t *_attr, const struct sched_param *schedparam)
320 {
321 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
322
323 if (!__attr_is_initialized(attr) || schedparam == NULL ||
324 !is_posix_policy_prio_valid(schedparam->sched_priority, attr->schedpolicy)) {
325 LOG_DBG("Invalid pthread_attr_t or sched_param");
326 return EINVAL;
327 }
328
329 attr->priority = schedparam->sched_priority;
330 return 0;
331 }
332
333 /**
334 * @brief Set stack attributes in thread attributes object.
335 *
336 * See IEEE 1003.1
337 */
pthread_attr_setstack(pthread_attr_t * _attr,void * stackaddr,size_t stacksize)338 int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksize)
339 {
340 int ret;
341 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
342
343 if (stackaddr == NULL) {
344 LOG_DBG("NULL stack address");
345 return EACCES;
346 }
347
348 if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
349 stacksize > PTHREAD_STACK_MAX) {
350 LOG_DBG("Invalid stacksize %zu", stacksize);
351 return EINVAL;
352 }
353
354 if (attr->stack != NULL) {
355 ret = k_thread_stack_free(attr->stack);
356 if (ret == 0) {
357 LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
358 __get_attr_stacksize(attr), attr->stack);
359 }
360 }
361
362 attr->stack = stackaddr;
363 __set_attr_stacksize(attr, stacksize);
364
365 LOG_DBG("Assigned thread stack %zu@%p to attr %p", __get_attr_stacksize(attr), attr->stack,
366 _attr);
367
368 return 0;
369 }
370
371 /**
372 * @brief Get scope attributes in thread attributes object.
373 *
374 * See IEEE 1003.1
375 */
pthread_attr_getscope(const pthread_attr_t * _attr,int * contentionscope)376 int pthread_attr_getscope(const pthread_attr_t *_attr, int *contentionscope)
377 {
378 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
379
380 if (!__attr_is_initialized(attr) || contentionscope == NULL) {
381 return EINVAL;
382 }
383 *contentionscope = attr->contentionscope;
384 return 0;
385 }
386
387 /**
388 * @brief Set scope attributes in thread attributes object.
389 *
390 * See IEEE 1003.1
391 */
pthread_attr_setscope(pthread_attr_t * _attr,int contentionscope)392 int pthread_attr_setscope(pthread_attr_t *_attr, int contentionscope)
393 {
394 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
395
396 if (!__attr_is_initialized(attr)) {
397 LOG_DBG("attr %p is not initialized", attr);
398 return EINVAL;
399 }
400 if (!(contentionscope == PTHREAD_SCOPE_PROCESS ||
401 contentionscope == PTHREAD_SCOPE_SYSTEM)) {
402 LOG_DBG("%s contentionscope %d", "Invalid", contentionscope);
403 return EINVAL;
404 }
405 if (contentionscope == PTHREAD_SCOPE_PROCESS) {
406 /* Zephyr does not yet support processes or process scheduling */
407 LOG_DBG("%s contentionscope %d", "Unsupported", contentionscope);
408 return ENOTSUP;
409 }
410 attr->contentionscope = contentionscope;
411 return 0;
412 }
413
414 /**
415 * @brief Get inherit scheduler attributes in thread attributes object.
416 *
417 * See IEEE 1003.1
418 */
pthread_attr_getinheritsched(const pthread_attr_t * _attr,int * inheritsched)419 int pthread_attr_getinheritsched(const pthread_attr_t *_attr, int *inheritsched)
420 {
421 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
422
423 if (!__attr_is_initialized(attr) || inheritsched == NULL) {
424 return EINVAL;
425 }
426 *inheritsched = attr->inheritsched;
427 return 0;
428 }
429
430 /**
431 * @brief Set inherit scheduler attributes in thread attributes object.
432 *
433 * See IEEE 1003.1
434 */
pthread_attr_setinheritsched(pthread_attr_t * _attr,int inheritsched)435 int pthread_attr_setinheritsched(pthread_attr_t *_attr, int inheritsched)
436 {
437 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
438
439 if (!__attr_is_initialized(attr)) {
440 LOG_DBG("attr %p is not initialized", attr);
441 return EINVAL;
442 }
443
444 if (inheritsched != PTHREAD_INHERIT_SCHED && inheritsched != PTHREAD_EXPLICIT_SCHED) {
445 LOG_DBG("Invalid inheritsched %d", inheritsched);
446 return EINVAL;
447 }
448
449 attr->inheritsched = inheritsched;
450 return 0;
451 }
452
posix_thread_recycle_work_handler(struct k_work * work)453 static void posix_thread_recycle_work_handler(struct k_work *work)
454 {
455 ARG_UNUSED(work);
456 posix_thread_recycle();
457 }
458 static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_work_handler);
459
posix_thread_finalize(struct posix_thread * t,void * retval)460 static void posix_thread_finalize(struct posix_thread *t, void *retval)
461 {
462 sys_snode_t *node_l;
463 k_spinlock_key_t key;
464 pthread_key_obj *key_obj;
465 pthread_thread_data *thread_spec_data;
466
467 SYS_SLIST_FOR_EACH_NODE(&t->key_list, node_l) {
468 thread_spec_data = (pthread_thread_data *)node_l;
469 if (thread_spec_data != NULL) {
470 key_obj = thread_spec_data->key;
471 if (key_obj->destructor != NULL) {
472 (key_obj->destructor)(thread_spec_data->spec_data);
473 }
474 }
475 }
476
477 /* move thread from run_q to done_q */
478 key = k_spin_lock(&pthread_pool_lock);
479 sys_dlist_remove(&t->q_node);
480 posix_thread_q_set(t, POSIX_THREAD_DONE_Q);
481 t->retval = retval;
482 k_spin_unlock(&pthread_pool_lock, key);
483
484 /* trigger recycle work */
485 (void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS));
486
487 /* abort the underlying k_thread */
488 k_thread_abort(&t->thread);
489 }
490
491 FUNC_NORETURN
zephyr_thread_wrapper(void * arg1,void * arg2,void * arg3)492 static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
493 {
494 int err;
495 int barrier;
496 void *(*fun_ptr)(void *arg) = arg2;
497 struct posix_thread *t = CONTAINER_OF(k_current_get(), struct posix_thread, thread);
498
499 if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
500 /* cross the barrier so that pthread_create() can continue */
501 barrier = POINTER_TO_UINT(arg3);
502 err = pthread_barrier_wait(&barrier);
503 __ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
504 }
505
506 posix_thread_finalize(t, fun_ptr(arg1));
507
508 CODE_UNREACHABLE;
509 }
510
posix_thread_recycle(void)511 static void posix_thread_recycle(void)
512 {
513 k_spinlock_key_t key;
514 struct posix_thread *t;
515 struct posix_thread *safe_t;
516 sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables);
517
518 key = k_spin_lock(&pthread_pool_lock);
519 SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t, q_node) {
520 if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) {
521 /* thread has not been joined yet */
522 continue;
523 }
524
525 sys_dlist_remove(&t->q_node);
526 sys_dlist_append(&recyclables, &t->q_node);
527 }
528 k_spin_unlock(&pthread_pool_lock, key);
529
530 if (sys_dlist_is_empty(&recyclables)) {
531 return;
532 }
533
534 LOG_DBG("Recycling %zu threads", sys_dlist_len(&recyclables));
535
536 SYS_DLIST_FOR_EACH_CONTAINER(&recyclables, t, q_node) {
537 if (t->attr.caller_destroys) {
538 t->attr = (struct posix_thread_attr){0};
539 } else {
540 (void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
541 }
542 }
543
544 key = k_spin_lock(&pthread_pool_lock);
545 while (!sys_dlist_is_empty(&recyclables)) {
546 t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node);
547 posix_thread_q_set(t, POSIX_THREAD_READY_Q);
548 }
549 k_spin_unlock(&pthread_pool_lock, key);
550 }
551
552 /**
553 * @brief Create a new thread.
554 *
555 * Pthread attribute should not be NULL. API will return Error on NULL
556 * attribute value.
557 *
558 * See IEEE 1003.1
559 */
pthread_create(pthread_t * th,const pthread_attr_t * _attr,void * (* threadroutine)(void *),void * arg)560 int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadroutine)(void *),
561 void *arg)
562 {
563 int err;
564 pthread_barrier_t barrier;
565 struct posix_thread *t = NULL;
566
567 if (!(_attr == NULL || __attr_is_runnable((struct posix_thread_attr *)_attr))) {
568 return EINVAL;
569 }
570
571 /* reclaim resources greedily */
572 posix_thread_recycle();
573
574 K_SPINLOCK(&pthread_pool_lock) {
575 if (!sys_dlist_is_empty(&posix_thread_q[POSIX_THREAD_READY_Q])) {
576 t = CONTAINER_OF(sys_dlist_get(&posix_thread_q[POSIX_THREAD_READY_Q]),
577 struct posix_thread, q_node);
578
579 /* initialize thread state */
580 posix_thread_q_set(t, POSIX_THREAD_RUN_Q);
581 sys_slist_init(&t->key_list);
582 sys_slist_init(&t->cleanup_list);
583 }
584 }
585
586 if (t != NULL && IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
587 err = pthread_barrier_init(&barrier, NULL, 2);
588 if (err != 0) {
589 /* cannot allocate barrier. move thread back to ready_q */
590 K_SPINLOCK(&pthread_pool_lock) {
591 sys_dlist_remove(&t->q_node);
592 posix_thread_q_set(t, POSIX_THREAD_READY_Q);
593 }
594 t = NULL;
595 }
596 }
597
598 if (t == NULL) {
599 /* no threads are ready */
600 LOG_DBG("No threads are ready");
601 return EAGAIN;
602 }
603
604 if (_attr == NULL) {
605 err = pthread_attr_init((pthread_attr_t *)&t->attr);
606 if (err == 0 && !__attr_is_runnable(&t->attr)) {
607 (void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
608 err = EINVAL;
609 }
610 if (err != 0) {
611 /* cannot allocate pthread attributes (e.g. stack) */
612 K_SPINLOCK(&pthread_pool_lock) {
613 sys_dlist_remove(&t->q_node);
614 posix_thread_q_set(t, POSIX_THREAD_READY_Q);
615 }
616 return err;
617 }
618 /* caller not responsible for destroying attr */
619 t->attr.caller_destroys = false;
620 } else {
621 /* copy user-provided attr into thread, caller must destroy attr at a later time */
622 t->attr = *(struct posix_thread_attr *)_attr;
623 }
624
625 if (t->attr.inheritsched == PTHREAD_INHERIT_SCHED) {
626 int pol;
627
628 t->attr.priority =
629 zephyr_to_posix_priority(k_thread_priority_get(k_current_get()), &pol);
630 t->attr.schedpolicy = pol;
631 }
632
633 /* spawn the thread */
634 k_thread_create(
635 &t->thread, t->attr.stack, __get_attr_stacksize(&t->attr) + t->attr.guardsize,
636 zephyr_thread_wrapper, (void *)arg, threadroutine,
637 IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER) ? UINT_TO_POINTER(barrier) : NULL,
638 posix_to_zephyr_priority(t->attr.priority, t->attr.schedpolicy), 0, K_NO_WAIT);
639
640 if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
641 /* wait for the spawned thread to cross our barrier */
642 err = pthread_barrier_wait(&barrier);
643 __ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
644 err = pthread_barrier_destroy(&barrier);
645 __ASSERT_NO_MSG(err == 0);
646 }
647
648 /* finally provide the initialized thread to the caller */
649 *th = mark_pthread_obj_initialized(posix_thread_to_offset(t));
650
651 LOG_DBG("Created pthread %p", &t->thread);
652
653 return 0;
654 }
655
pthread_getconcurrency(void)656 int pthread_getconcurrency(void)
657 {
658 int ret = 0;
659
660 K_SPINLOCK(&pthread_pool_lock) {
661 ret = pthread_concurrency;
662 }
663
664 return ret;
665 }
666
pthread_setconcurrency(int new_level)667 int pthread_setconcurrency(int new_level)
668 {
669 if (new_level < 0) {
670 return EINVAL;
671 }
672
673 if (new_level > CONFIG_MP_MAX_NUM_CPUS) {
674 return EAGAIN;
675 }
676
677 K_SPINLOCK(&pthread_pool_lock) {
678 pthread_concurrency = new_level;
679 }
680
681 return 0;
682 }
683
684 /**
685 * @brief Set cancelability State.
686 *
687 * See IEEE 1003.1
688 */
pthread_setcancelstate(int state,int * oldstate)689 int pthread_setcancelstate(int state, int *oldstate)
690 {
691 int ret = 0;
692 struct posix_thread *t;
693 bool cancel_pending = false;
694 bool cancel_type = PTHREAD_CANCEL_ENABLE;
695
696 if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) {
697 LOG_DBG("Invalid pthread state %d", state);
698 return EINVAL;
699 }
700
701 K_SPINLOCK(&pthread_pool_lock) {
702 t = to_posix_thread(pthread_self());
703 if (t == NULL) {
704 ret = EINVAL;
705 K_SPINLOCK_BREAK;
706 }
707
708 if (oldstate != NULL) {
709 *oldstate = t->attr.cancelstate;
710 }
711
712 t->attr.cancelstate = state;
713 cancel_pending = t->attr.cancelpending;
714 cancel_type = t->attr.canceltype;
715 }
716
717 if (state == PTHREAD_CANCEL_ENABLE && cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS &&
718 cancel_pending) {
719 posix_thread_finalize(t, PTHREAD_CANCELED);
720 }
721
722 return 0;
723 }
724
725 /**
726 * @brief Set cancelability Type.
727 *
728 * See IEEE 1003.1
729 */
pthread_setcanceltype(int type,int * oldtype)730 int pthread_setcanceltype(int type, int *oldtype)
731 {
732 int ret = 0;
733 struct posix_thread *t;
734
735 if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS) {
736 LOG_DBG("Invalid pthread cancel type %d", type);
737 return EINVAL;
738 }
739
740 K_SPINLOCK(&pthread_pool_lock) {
741 t = to_posix_thread(pthread_self());
742 if (t == NULL) {
743 ret = EINVAL;
744 K_SPINLOCK_BREAK;
745 }
746
747 if (oldtype != NULL) {
748 *oldtype = t->attr.canceltype;
749 }
750 t->attr.canceltype = type;
751 }
752
753 return ret;
754 }
755
756 /**
757 * @brief Create a cancellation point in the calling thread.
758 *
759 * See IEEE 1003.1
760 */
pthread_testcancel(void)761 void pthread_testcancel(void)
762 {
763 struct posix_thread *t;
764 bool cancel_pended = false;
765
766 K_SPINLOCK(&pthread_pool_lock) {
767 t = to_posix_thread(pthread_self());
768 if (t == NULL) {
769 K_SPINLOCK_BREAK;
770 }
771 if (t->attr.cancelstate != PTHREAD_CANCEL_ENABLE) {
772 K_SPINLOCK_BREAK;
773 }
774 if (t->attr.cancelpending) {
775 cancel_pended = true;
776 t->attr.cancelstate = PTHREAD_CANCEL_DISABLE;
777 }
778 }
779
780 if (cancel_pended) {
781 posix_thread_finalize(t, PTHREAD_CANCELED);
782 }
783 }
784
785 /**
786 * @brief Cancel execution of a thread.
787 *
788 * See IEEE 1003.1
789 */
pthread_cancel(pthread_t pthread)790 int pthread_cancel(pthread_t pthread)
791 {
792 int ret = 0;
793 bool cancel_state = PTHREAD_CANCEL_ENABLE;
794 bool cancel_type = PTHREAD_CANCEL_DEFERRED;
795 struct posix_thread *t = NULL;
796
797 K_SPINLOCK(&pthread_pool_lock) {
798 t = to_posix_thread(pthread);
799 if (t == NULL) {
800 ret = ESRCH;
801 K_SPINLOCK_BREAK;
802 }
803
804 if (!__attr_is_initialized(&t->attr)) {
805 /* thread has already terminated */
806 ret = ESRCH;
807 K_SPINLOCK_BREAK;
808 }
809
810 t->attr.cancelpending = true;
811 cancel_state = t->attr.cancelstate;
812 cancel_type = t->attr.canceltype;
813 }
814
815 if (ret == 0 && cancel_state == PTHREAD_CANCEL_ENABLE &&
816 cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS) {
817 posix_thread_finalize(t, PTHREAD_CANCELED);
818 }
819
820 return ret;
821 }
822
823 /**
824 * @brief Set thread scheduling policy and parameters.
825 *
826 * See IEEE 1003.1
827 */
pthread_setschedparam(pthread_t pthread,int policy,const struct sched_param * param)828 int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param)
829 {
830 int ret = 0;
831 int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
832 struct posix_thread *t = NULL;
833
834 if (param == NULL || !valid_posix_policy(policy) ||
835 !is_posix_policy_prio_valid(param->sched_priority, policy)) {
836 return EINVAL;
837 }
838
839 K_SPINLOCK(&pthread_pool_lock) {
840 t = to_posix_thread(pthread);
841 if (t == NULL) {
842 ret = ESRCH;
843 K_SPINLOCK_BREAK;
844 }
845
846 new_prio = posix_to_zephyr_priority(param->sched_priority, policy);
847 }
848
849 if (ret == 0) {
850 k_thread_priority_set(&t->thread, new_prio);
851 }
852
853 return ret;
854 }
855
856 /**
857 * @brief Set thread scheduling priority.
858 *
859 * See IEEE 1003.1
860 */
pthread_setschedprio(pthread_t thread,int prio)861 int pthread_setschedprio(pthread_t thread, int prio)
862 {
863 int ret;
864 int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
865 struct posix_thread *t = NULL;
866 int policy = -1;
867 struct sched_param param;
868
869 ret = pthread_getschedparam(thread, &policy, ¶m);
870
871 if (ret != 0) {
872 return ret;
873 }
874
875 if (!is_posix_policy_prio_valid(prio, policy)) {
876 return EINVAL;
877 }
878
879 K_SPINLOCK(&pthread_pool_lock) {
880 t = to_posix_thread(thread);
881 if (t == NULL) {
882 ret = ESRCH;
883 K_SPINLOCK_BREAK;
884 }
885
886 new_prio = posix_to_zephyr_priority(prio, policy);
887 }
888
889 if (ret == 0) {
890 k_thread_priority_set(&t->thread, new_prio);
891 }
892
893 return ret;
894 }
895
896 /**
897 * @brief Initialise threads attribute object
898 *
899 * See IEEE 1003.1
900 */
pthread_attr_init(pthread_attr_t * _attr)901 int pthread_attr_init(pthread_attr_t *_attr)
902 {
903 struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
904
905 if (attr == NULL) {
906 LOG_DBG("Invalid attr pointer");
907 return ENOMEM;
908 }
909
910 BUILD_ASSERT(DYNAMIC_STACK_SIZE <= PTHREAD_STACK_MAX);
911
912 *attr = (struct posix_thread_attr){0};
913 attr->guardsize = CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT;
914 attr->contentionscope = PTHREAD_SCOPE_SYSTEM;
915 attr->inheritsched = PTHREAD_INHERIT_SCHED;
916
917 if (DYNAMIC_STACK_SIZE > 0) {
918 attr->stack = k_thread_stack_alloc(DYNAMIC_STACK_SIZE + attr->guardsize,
919 k_is_user_context() ? K_USER : 0);
920 if (attr->stack == NULL) {
921 LOG_DBG("Did not auto-allocate thread stack");
922 } else {
923 __set_attr_stacksize(attr, DYNAMIC_STACK_SIZE);
924 __ASSERT_NO_MSG(__attr_is_initialized(attr));
925 LOG_DBG("Allocated thread stack %zu@%p", __get_attr_stacksize(attr),
926 attr->stack);
927 }
928 }
929
930 /* caller responsible for destroying attr */
931 attr->initialized = true;
932
933 LOG_DBG("Initialized attr %p", _attr);
934
935 return 0;
936 }
937
938 /**
939 * @brief Get thread scheduling policy and parameters
940 *
941 * See IEEE 1003.1
942 */
pthread_getschedparam(pthread_t pthread,int * policy,struct sched_param * param)943 int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
944 {
945 int ret = 0;
946 struct posix_thread *t;
947
948 if (policy == NULL || param == NULL) {
949 return EINVAL;
950 }
951
952 K_SPINLOCK(&pthread_pool_lock) {
953 t = to_posix_thread(pthread);
954 if (t == NULL) {
955 ret = ESRCH;
956 K_SPINLOCK_BREAK;
957 }
958
959 if (!__attr_is_initialized(&t->attr)) {
960 ret = ESRCH;
961 K_SPINLOCK_BREAK;
962 }
963
964 param->sched_priority =
965 zephyr_to_posix_priority(k_thread_priority_get(&t->thread), policy);
966 }
967
968 return ret;
969 }
970
971 /**
972 * @brief Dynamic package initialization
973 *
974 * See IEEE 1003.1
975 */
pthread_once(pthread_once_t * once,void (* init_func)(void))976 int pthread_once(pthread_once_t *once, void (*init_func)(void))
977 {
978 __unused int ret;
979 bool run_init_func = false;
980 struct pthread_once *const _once = (struct pthread_once *)once;
981
982 if (init_func == NULL) {
983 return EINVAL;
984 }
985
986 K_SPINLOCK(&pthread_pool_lock) {
987 if (!_once->flag) {
988 run_init_func = true;
989 _once->flag = true;
990 }
991 }
992
993 if (run_init_func) {
994 init_func();
995 }
996
997 return 0;
998 }
999
1000 /**
1001 * @brief Terminate calling thread.
1002 *
1003 * See IEEE 1003.1
1004 */
1005 FUNC_NORETURN
pthread_exit(void * retval)1006 void pthread_exit(void *retval)
1007 {
1008 struct posix_thread *self = NULL;
1009
1010 K_SPINLOCK(&pthread_pool_lock) {
1011 self = to_posix_thread(pthread_self());
1012 if (self == NULL) {
1013 K_SPINLOCK_BREAK;
1014 }
1015
1016 /* Mark a thread as cancellable before exiting */
1017 self->attr.cancelstate = PTHREAD_CANCEL_ENABLE;
1018 }
1019
1020 if (self == NULL) {
1021 /* not a valid posix_thread */
1022 LOG_DBG("Aborting non-pthread %p", k_current_get());
1023 k_thread_abort(k_current_get());
1024
1025 CODE_UNREACHABLE;
1026 }
1027
1028 posix_thread_finalize(self, retval);
1029 CODE_UNREACHABLE;
1030 }
1031
1032 /**
1033 * @brief Wait for a thread termination.
1034 *
1035 * See IEEE 1003.1
1036 */
pthread_join(pthread_t pthread,void ** status)1037 int pthread_join(pthread_t pthread, void **status)
1038 {
1039 int ret = 0;
1040 struct posix_thread *t = NULL;
1041
1042 if (pthread == pthread_self()) {
1043 LOG_DBG("Pthread attempted to join itself (%x)", pthread);
1044 return EDEADLK;
1045 }
1046
1047 K_SPINLOCK(&pthread_pool_lock) {
1048 t = to_posix_thread(pthread);
1049 if (t == NULL) {
1050 ret = ESRCH;
1051 K_SPINLOCK_BREAK;
1052 }
1053
1054 LOG_DBG("Pthread %p joining..", &t->thread);
1055
1056 if (t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1057 /* undefined behaviour */
1058 ret = EINVAL;
1059 K_SPINLOCK_BREAK;
1060 }
1061
1062 if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q) {
1063 ret = ESRCH;
1064 K_SPINLOCK_BREAK;
1065 }
1066
1067 /*
1068 * thread is joinable and is in run_q or done_q.
1069 * let's ensure that the thread cannot be joined again after this point.
1070 */
1071 t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1072 }
1073
1074 switch (ret) {
1075 case ESRCH:
1076 LOG_DBG("Pthread %p has already been joined", &t->thread);
1077 return ret;
1078 case EINVAL:
1079 LOG_DBG("Pthread %p is not a joinable", &t->thread);
1080 return ret;
1081 case 0:
1082 break;
1083 }
1084
1085 ret = k_thread_join(&t->thread, K_FOREVER);
1086 /* other possibilities? */
1087 __ASSERT_NO_MSG(ret == 0);
1088
1089 LOG_DBG("Joined pthread %p", &t->thread);
1090
1091 if (status != NULL) {
1092 LOG_DBG("Writing status to %p", status);
1093 *status = t->retval;
1094 }
1095
1096 posix_thread_recycle();
1097
1098 return 0;
1099 }
1100
1101 /**
1102 * @brief Detach a thread.
1103 *
1104 * See IEEE 1003.1
1105 */
pthread_detach(pthread_t pthread)1106 int pthread_detach(pthread_t pthread)
1107 {
1108 int ret = 0;
1109 struct posix_thread *t;
1110
1111 K_SPINLOCK(&pthread_pool_lock) {
1112 t = to_posix_thread(pthread);
1113 if (t == NULL) {
1114 ret = ESRCH;
1115 K_SPINLOCK_BREAK;
1116 }
1117
1118 if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
1119 t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1120 LOG_DBG("Pthread %p cannot be detached", &t->thread);
1121 ret = EINVAL;
1122 K_SPINLOCK_BREAK;
1123 }
1124
1125 t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1126 }
1127
1128 if (ret == 0) {
1129 LOG_DBG("Pthread %p detached", &t->thread);
1130 }
1131
1132 return ret;
1133 }
1134
1135 /**
1136 * @brief Get detach state attribute in thread attributes object.
1137 *
1138 * See IEEE 1003.1
1139 */
pthread_attr_getdetachstate(const pthread_attr_t * _attr,int * detachstate)1140 int pthread_attr_getdetachstate(const pthread_attr_t *_attr, int *detachstate)
1141 {
1142 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1143
1144 if (!__attr_is_initialized(attr) || (detachstate == NULL)) {
1145 return EINVAL;
1146 }
1147
1148 *detachstate = attr->detachstate;
1149 return 0;
1150 }
1151
1152 /**
1153 * @brief Set detach state attribute in thread attributes object.
1154 *
1155 * See IEEE 1003.1
1156 */
pthread_attr_setdetachstate(pthread_attr_t * _attr,int detachstate)1157 int pthread_attr_setdetachstate(pthread_attr_t *_attr, int detachstate)
1158 {
1159 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1160
1161 if (!__attr_is_initialized(attr) || ((detachstate != PTHREAD_CREATE_DETACHED) &&
1162 (detachstate != PTHREAD_CREATE_JOINABLE))) {
1163 return EINVAL;
1164 }
1165
1166 attr->detachstate = detachstate;
1167 return 0;
1168 }
1169
1170 /**
1171 * @brief Get scheduling policy attribute in Thread attributes.
1172 *
1173 * See IEEE 1003.1
1174 */
pthread_attr_getschedpolicy(const pthread_attr_t * _attr,int * policy)1175 int pthread_attr_getschedpolicy(const pthread_attr_t *_attr, int *policy)
1176 {
1177 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1178
1179 if (!__attr_is_initialized(attr) || (policy == NULL)) {
1180 return EINVAL;
1181 }
1182
1183 *policy = attr->schedpolicy;
1184 return 0;
1185 }
1186
1187 /**
1188 * @brief Set scheduling policy attribute in Thread attributes object.
1189 *
1190 * See IEEE 1003.1
1191 */
pthread_attr_setschedpolicy(pthread_attr_t * _attr,int policy)1192 int pthread_attr_setschedpolicy(pthread_attr_t *_attr, int policy)
1193 {
1194 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1195
1196 if (!__attr_is_initialized(attr) || !valid_posix_policy(policy)) {
1197 return EINVAL;
1198 }
1199
1200 attr->schedpolicy = policy;
1201 return 0;
1202 }
1203
1204 /**
1205 * @brief Get stack size attribute in thread attributes object.
1206 *
1207 * See IEEE 1003.1
1208 */
pthread_attr_getstacksize(const pthread_attr_t * _attr,size_t * stacksize)1209 int pthread_attr_getstacksize(const pthread_attr_t *_attr, size_t *stacksize)
1210 {
1211 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1212
1213 if (!__attr_is_initialized(attr) || (stacksize == NULL)) {
1214 return EINVAL;
1215 }
1216
1217 *stacksize = __get_attr_stacksize(attr);
1218 return 0;
1219 }
1220
1221 /**
1222 * @brief Set stack size attribute in thread attributes object.
1223 *
1224 * See IEEE 1003.1
1225 */
pthread_attr_setstacksize(pthread_attr_t * _attr,size_t stacksize)1226 int pthread_attr_setstacksize(pthread_attr_t *_attr, size_t stacksize)
1227 {
1228 int ret;
1229 void *new_stack;
1230 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1231
1232 if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
1233 stacksize > PTHREAD_STACK_MAX) {
1234 return EINVAL;
1235 }
1236
1237 if (__get_attr_stacksize(attr) == stacksize) {
1238 return 0;
1239 }
1240
1241 new_stack =
1242 k_thread_stack_alloc(stacksize + attr->guardsize, k_is_user_context() ? K_USER : 0);
1243 if (new_stack == NULL) {
1244 if (stacksize < __get_attr_stacksize(attr)) {
1245 __set_attr_stacksize(attr, stacksize);
1246 return 0;
1247 }
1248
1249 LOG_DBG("k_thread_stack_alloc(%zu) failed",
1250 __get_attr_stacksize(attr) + attr->guardsize);
1251 return ENOMEM;
1252 }
1253 LOG_DBG("Allocated thread stack %zu@%p", stacksize + attr->guardsize, new_stack);
1254
1255 if (attr->stack != NULL) {
1256 ret = k_thread_stack_free(attr->stack);
1257 if (ret == 0) {
1258 LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
1259 __get_attr_stacksize(attr), attr->stack);
1260 }
1261 }
1262
1263 __set_attr_stacksize(attr, stacksize);
1264 attr->stack = new_stack;
1265
1266 return 0;
1267 }
1268
1269 /**
1270 * @brief Get stack attributes in thread attributes object.
1271 *
1272 * See IEEE 1003.1
1273 */
pthread_attr_getstack(const pthread_attr_t * _attr,void ** stackaddr,size_t * stacksize)1274 int pthread_attr_getstack(const pthread_attr_t *_attr, void **stackaddr, size_t *stacksize)
1275 {
1276 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1277
1278 if (!__attr_is_initialized(attr) || (stackaddr == NULL) || (stacksize == NULL)) {
1279 return EINVAL;
1280 }
1281
1282 *stackaddr = attr->stack;
1283 *stacksize = __get_attr_stacksize(attr);
1284 return 0;
1285 }
1286
pthread_attr_getguardsize(const pthread_attr_t * ZRESTRICT _attr,size_t * ZRESTRICT guardsize)1287 int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT _attr, size_t *ZRESTRICT guardsize)
1288 {
1289 struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1290
1291 if (!__attr_is_initialized(attr) || guardsize == NULL) {
1292 return EINVAL;
1293 }
1294
1295 *guardsize = attr->guardsize;
1296
1297 return 0;
1298 }
1299
pthread_attr_setguardsize(pthread_attr_t * _attr,size_t guardsize)1300 int pthread_attr_setguardsize(pthread_attr_t *_attr, size_t guardsize)
1301 {
1302 struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1303
1304 if (!__attr_is_initialized(attr) || guardsize > PTHREAD_GUARD_MAX) {
1305 return EINVAL;
1306 }
1307
1308 attr->guardsize = guardsize;
1309
1310 return 0;
1311 }
1312
1313 /**
1314 * @brief Get thread attributes object scheduling parameters.
1315 *
1316 * See IEEE 1003.1
1317 */
pthread_attr_getschedparam(const pthread_attr_t * _attr,struct sched_param * schedparam)1318 int pthread_attr_getschedparam(const pthread_attr_t *_attr, struct sched_param *schedparam)
1319 {
1320 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1321
1322 if (!__attr_is_initialized(attr) || (schedparam == NULL)) {
1323 return EINVAL;
1324 }
1325
1326 schedparam->sched_priority = attr->priority;
1327 return 0;
1328 }
1329
1330 /**
1331 * @brief Destroy thread attributes object.
1332 *
1333 * See IEEE 1003.1
1334 */
pthread_attr_destroy(pthread_attr_t * _attr)1335 int pthread_attr_destroy(pthread_attr_t *_attr)
1336 {
1337 int ret;
1338 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1339
1340 if (!__attr_is_initialized(attr)) {
1341 return EINVAL;
1342 }
1343
1344 ret = k_thread_stack_free(attr->stack);
1345 if (ret == 0) {
1346 LOG_DBG("Freed attr %p thread stack %zu@%p", _attr, __get_attr_stacksize(attr),
1347 attr->stack);
1348 }
1349
1350 *attr = (struct posix_thread_attr){0};
1351 LOG_DBG("Destroyed attr %p", _attr);
1352
1353 return 0;
1354 }
1355
pthread_setname_np(pthread_t thread,const char * name)1356 int pthread_setname_np(pthread_t thread, const char *name)
1357 {
1358 #ifdef CONFIG_THREAD_NAME
1359 k_tid_t kthread;
1360
1361 thread = get_posix_thread_idx(thread);
1362 if (thread >= ARRAY_SIZE(posix_thread_pool)) {
1363 return ESRCH;
1364 }
1365
1366 kthread = &posix_thread_pool[thread].thread;
1367
1368 if (name == NULL) {
1369 return EINVAL;
1370 }
1371
1372 return k_thread_name_set(kthread, name);
1373 #else
1374 ARG_UNUSED(thread);
1375 ARG_UNUSED(name);
1376 return 0;
1377 #endif
1378 }
1379
pthread_getname_np(pthread_t thread,char * name,size_t len)1380 int pthread_getname_np(pthread_t thread, char *name, size_t len)
1381 {
1382 #ifdef CONFIG_THREAD_NAME
1383 k_tid_t kthread;
1384
1385 thread = get_posix_thread_idx(thread);
1386 if (thread >= ARRAY_SIZE(posix_thread_pool)) {
1387 return ESRCH;
1388 }
1389
1390 if (name == NULL) {
1391 return EINVAL;
1392 }
1393
1394 memset(name, '\0', len);
1395 kthread = &posix_thread_pool[thread].thread;
1396 return k_thread_name_copy(kthread, name, len - 1);
1397 #else
1398 ARG_UNUSED(thread);
1399 ARG_UNUSED(name);
1400 ARG_UNUSED(len);
1401 return 0;
1402 #endif
1403 }
1404
pthread_atfork(void (* prepare)(void),void (* parent)(void),void (* child)(void))1405 int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
1406 {
1407 ARG_UNUSED(prepare);
1408 ARG_UNUSED(parent);
1409 ARG_UNUSED(child);
1410
1411 return ENOSYS;
1412 }
1413
1414 /* this should probably go into signal.c but we need access to the lock */
pthread_sigmask(int how,const sigset_t * ZRESTRICT set,sigset_t * ZRESTRICT oset)1415 int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT oset)
1416 {
1417 int ret = 0;
1418 struct posix_thread *t;
1419
1420 if (!(how == SIG_BLOCK || how == SIG_SETMASK || how == SIG_UNBLOCK)) {
1421 return EINVAL;
1422 }
1423
1424 K_SPINLOCK(&pthread_pool_lock) {
1425 t = to_posix_thread(pthread_self());
1426 if (t == NULL) {
1427 ret = ESRCH;
1428 K_SPINLOCK_BREAK;
1429 }
1430
1431 if (oset != NULL) {
1432 *oset = t->sigset;
1433 }
1434
1435 if (set == NULL) {
1436 K_SPINLOCK_BREAK;
1437 }
1438
1439 switch (how) {
1440 case SIG_BLOCK:
1441 for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1442 t->sigset.sig[i] |= set->sig[i];
1443 }
1444 break;
1445 case SIG_SETMASK:
1446 t->sigset = *set;
1447 break;
1448 case SIG_UNBLOCK:
1449 for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1450 t->sigset.sig[i] &= ~set->sig[i];
1451 }
1452 break;
1453 }
1454 }
1455
1456 return ret;
1457 }
1458
posix_thread_pool_init(void)1459 static int posix_thread_pool_init(void)
1460 {
1461 ARRAY_FOR_EACH_PTR(posix_thread_pool, th) {
1462 posix_thread_q_set(th, POSIX_THREAD_READY_Q);
1463 }
1464
1465 return 0;
1466 }
1467 SYS_INIT(posix_thread_pool_init, PRE_KERNEL_1, 0);
1468