1 /*
2 * Copyright (c) 2018 Intel Corporation
3 * Copyright (c) 2023 Meta
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include "posix_internal.h"
9 #include "pthread_sched.h"
10
11 #include <stdio.h>
12
13 #include <zephyr/init.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/logging/log.h>
16 #include <zephyr/sys/atomic.h>
17 #include <zephyr/posix/pthread.h>
18 #include <zephyr/posix/unistd.h>
19 #include <zephyr/sys/slist.h>
20 #include <zephyr/sys/util.h>
21
22 #define ZEPHYR_TO_POSIX_PRIORITY(_zprio) \
23 (((_zprio) < 0) ? (-1 * ((_zprio) + 1)) : (CONFIG_NUM_PREEMPT_PRIORITIES - (_zprio)-1))
24
25 #define POSIX_TO_ZEPHYR_PRIORITY(_prio, _pol) \
26 (((_pol) == SCHED_FIFO) ? (-1 * ((_prio) + 1)) \
27 : (CONFIG_NUM_PREEMPT_PRIORITIES - (_prio)-1))
28
29 #define DEFAULT_PTHREAD_PRIORITY \
30 POSIX_TO_ZEPHYR_PRIORITY(K_LOWEST_APPLICATION_THREAD_PRIO, DEFAULT_PTHREAD_POLICY)
31 #define DEFAULT_PTHREAD_POLICY (IS_ENABLED(CONFIG_PREEMPT_ENABLED) ? SCHED_RR : SCHED_FIFO)
32
33 #define PTHREAD_STACK_MAX BIT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS)
34 #define PTHREAD_GUARD_MAX BIT_MASK(CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS)
35
36 LOG_MODULE_REGISTER(pthread, CONFIG_PTHREAD_LOG_LEVEL);
37
38 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
39 #define DYNAMIC_STACK_SIZE CONFIG_DYNAMIC_THREAD_STACK_SIZE
40 #else
41 #define DYNAMIC_STACK_SIZE 0
42 #endif
43
__get_attr_stacksize(const struct posix_thread_attr * attr)44 static inline size_t __get_attr_stacksize(const struct posix_thread_attr *attr)
45 {
46 return attr->stacksize + 1;
47 }
48
__set_attr_stacksize(struct posix_thread_attr * attr,size_t stacksize)49 static inline void __set_attr_stacksize(struct posix_thread_attr *attr, size_t stacksize)
50 {
51 attr->stacksize = stacksize - 1;
52 }
53
54 struct __pthread_cleanup {
55 void (*routine)(void *arg);
56 void *arg;
57 sys_snode_t node;
58 };
59
60 enum posix_thread_qid {
61 /* ready to be started via pthread_create() */
62 POSIX_THREAD_READY_Q,
63 /* running */
64 POSIX_THREAD_RUN_Q,
65 /* exited (either joinable or detached) */
66 POSIX_THREAD_DONE_Q,
67 /* invalid */
68 POSIX_THREAD_INVALID_Q,
69 };
70
71 /* only 2 bits in struct posix_thread_attr for schedpolicy */
72 BUILD_ASSERT(SCHED_OTHER < BIT(2) && SCHED_FIFO < BIT(2) && SCHED_RR < BIT(2));
73
74 BUILD_ASSERT((PTHREAD_CREATE_DETACHED == 0 || PTHREAD_CREATE_JOINABLE == 0) &&
75 (PTHREAD_CREATE_DETACHED == 1 || PTHREAD_CREATE_JOINABLE == 1));
76
77 BUILD_ASSERT((PTHREAD_CANCEL_ENABLE == 0 || PTHREAD_CANCEL_DISABLE == 0) &&
78 (PTHREAD_CANCEL_ENABLE == 1 || PTHREAD_CANCEL_DISABLE == 1));
79
80 BUILD_ASSERT(CONFIG_POSIX_PTHREAD_ATTR_STACKSIZE_BITS + CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_BITS <=
81 32);
82
83 static void posix_thread_recycle(void);
84 static sys_dlist_t posix_thread_q[] = {
85 SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_READY_Q]),
86 SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_RUN_Q]),
87 SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_DONE_Q]),
88 };
89 static struct posix_thread posix_thread_pool[CONFIG_MAX_PTHREAD_COUNT];
90 static struct k_spinlock pthread_pool_lock;
91 static int pthread_concurrency;
92
posix_thread_q_set(struct posix_thread * t,enum posix_thread_qid qid)93 static inline void posix_thread_q_set(struct posix_thread *t, enum posix_thread_qid qid)
94 {
95 switch (qid) {
96 case POSIX_THREAD_READY_Q:
97 case POSIX_THREAD_RUN_Q:
98 case POSIX_THREAD_DONE_Q:
99 sys_dlist_append(&posix_thread_q[qid], &t->q_node);
100 t->qid = qid;
101 break;
102 default:
103 __ASSERT(false, "cannot set invalid qid %d for posix thread %p", qid, t);
104 break;
105 }
106 }
107
posix_thread_q_get(struct posix_thread * t)108 static inline enum posix_thread_qid posix_thread_q_get(struct posix_thread *t)
109 {
110 switch (t->qid) {
111 case POSIX_THREAD_READY_Q:
112 case POSIX_THREAD_RUN_Q:
113 case POSIX_THREAD_DONE_Q:
114 return t->qid;
115 default:
116 __ASSERT(false, "posix thread %p has invalid qid: %d", t, t->qid);
117 return POSIX_THREAD_INVALID_Q;
118 }
119 }
120
121 /*
122 * We reserve the MSB to mark a pthread_t as initialized (from the
123 * perspective of the application). With a linear space, this means that
124 * the theoretical pthread_t range is [0,2147483647].
125 */
126 BUILD_ASSERT(CONFIG_MAX_PTHREAD_COUNT < PTHREAD_OBJ_MASK_INIT,
127 "CONFIG_MAX_PTHREAD_COUNT is too high");
128
posix_thread_to_offset(struct posix_thread * t)129 static inline size_t posix_thread_to_offset(struct posix_thread *t)
130 {
131 return t - posix_thread_pool;
132 }
133
get_posix_thread_idx(pthread_t pth)134 static inline size_t get_posix_thread_idx(pthread_t pth)
135 {
136 return mark_pthread_obj_uninitialized(pth);
137 }
138
to_posix_thread(pthread_t pthread)139 struct posix_thread *to_posix_thread(pthread_t pthread)
140 {
141 struct posix_thread *t;
142 bool actually_initialized;
143 size_t bit = get_posix_thread_idx(pthread);
144
145 /* if the provided thread does not claim to be initialized, its invalid */
146 if (!is_pthread_obj_initialized(pthread)) {
147 LOG_ERR("pthread is not initialized (%x)", pthread);
148 return NULL;
149 }
150
151 if (bit >= CONFIG_MAX_PTHREAD_COUNT) {
152 LOG_ERR("Invalid pthread (%x)", pthread);
153 return NULL;
154 }
155
156 t = &posix_thread_pool[bit];
157
158 /*
159 * Denote a pthread as "initialized" (i.e. allocated) if it is not in ready_q.
160 * This differs from other posix object allocation strategies because they use
161 * a bitarray to indicate whether an object has been allocated.
162 */
163 actually_initialized = !(posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
164 (posix_thread_q_get(t) == POSIX_THREAD_DONE_Q &&
165 t->attr.detachstate == PTHREAD_CREATE_DETACHED));
166
167 if (!actually_initialized) {
168 LOG_ERR("Pthread claims to be initialized (%x)", pthread);
169 return NULL;
170 }
171
172 return &posix_thread_pool[bit];
173 }
174
pthread_self(void)175 pthread_t pthread_self(void)
176 {
177 size_t bit;
178 struct posix_thread *t;
179
180 t = (struct posix_thread *)CONTAINER_OF(k_current_get(), struct posix_thread, thread);
181 bit = posix_thread_to_offset(t);
182
183 return mark_pthread_obj_initialized(bit);
184 }
185
pthread_equal(pthread_t pt1,pthread_t pt2)186 int pthread_equal(pthread_t pt1, pthread_t pt2)
187 {
188 return (pt1 == pt2);
189 }
190
getpid(void)191 pid_t getpid(void)
192 {
193 /*
194 * To maintain compatibility with some other POSIX operating systems,
195 * a PID of zero is used to indicate that the process exists in another namespace.
196 * PID zero is also used by the scheduler in some cases.
197 * PID one is usually reserved for the init process.
198 * Also note, that negative PIDs may be used by kill()
199 * to send signals to process groups in some implementations.
200 *
201 * At the moment, getpid just returns an arbitrary number >= 2
202 */
203
204 return 42;
205 }
206
__z_pthread_cleanup_init(struct __pthread_cleanup * c,void (* routine)(void * arg),void * arg)207 static inline void __z_pthread_cleanup_init(struct __pthread_cleanup *c, void (*routine)(void *arg),
208 void *arg)
209 {
210 *c = (struct __pthread_cleanup){
211 .routine = routine,
212 .arg = arg,
213 .node = {0},
214 };
215 }
216
__z_pthread_cleanup_push(void * cleanup[3],void (* routine)(void * arg),void * arg)217 void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void *arg)
218 {
219 struct posix_thread *t = NULL;
220 struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup;
221
222 K_SPINLOCK(&pthread_pool_lock) {
223 t = to_posix_thread(pthread_self());
224 BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c));
225 __ASSERT_NO_MSG(t != NULL);
226 __ASSERT_NO_MSG(c != NULL);
227 __ASSERT_NO_MSG(routine != NULL);
228 __z_pthread_cleanup_init(c, routine, arg);
229 sys_slist_prepend(&t->cleanup_list, &c->node);
230 }
231 }
232
__z_pthread_cleanup_pop(int execute)233 void __z_pthread_cleanup_pop(int execute)
234 {
235 sys_snode_t *node;
236 struct __pthread_cleanup *c = NULL;
237 struct posix_thread *t = NULL;
238
239 K_SPINLOCK(&pthread_pool_lock) {
240 t = to_posix_thread(pthread_self());
241 __ASSERT_NO_MSG(t != NULL);
242 node = sys_slist_get(&t->cleanup_list);
243 __ASSERT_NO_MSG(node != NULL);
244 c = CONTAINER_OF(node, struct __pthread_cleanup, node);
245 __ASSERT_NO_MSG(c != NULL);
246 __ASSERT_NO_MSG(c->routine != NULL);
247 }
248 if (execute) {
249 c->routine(c->arg);
250 }
251 }
252
is_posix_policy_prio_valid(int priority,int policy)253 static bool is_posix_policy_prio_valid(int priority, int policy)
254 {
255 if (priority >= sched_get_priority_min(policy) &&
256 priority <= sched_get_priority_max(policy)) {
257 return true;
258 }
259
260 LOG_ERR("Invalid priority %d and / or policy %d", priority, policy);
261
262 return false;
263 }
264
265 /* Non-static so that they can be tested in ztest */
zephyr_to_posix_priority(int z_prio,int * policy)266 int zephyr_to_posix_priority(int z_prio, int *policy)
267 {
268 int priority;
269
270 if (z_prio < 0) {
271 __ASSERT_NO_MSG(-z_prio <= CONFIG_NUM_COOP_PRIORITIES);
272 } else {
273 __ASSERT_NO_MSG(z_prio < CONFIG_NUM_PREEMPT_PRIORITIES);
274 }
275
276 *policy = (z_prio < 0) ? SCHED_FIFO : SCHED_RR;
277 priority = ZEPHYR_TO_POSIX_PRIORITY(z_prio);
278 __ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, *policy));
279
280 return priority;
281 }
282
283 /* Non-static so that they can be tested in ztest */
posix_to_zephyr_priority(int priority,int policy)284 int posix_to_zephyr_priority(int priority, int policy)
285 {
286 __ASSERT_NO_MSG(is_posix_policy_prio_valid(priority, policy));
287
288 return POSIX_TO_ZEPHYR_PRIORITY(priority, policy);
289 }
290
__attr_is_runnable(const struct posix_thread_attr * attr)291 static bool __attr_is_runnable(const struct posix_thread_attr *attr)
292 {
293 size_t stacksize;
294
295 if (attr == NULL || attr->stack == NULL) {
296 LOG_DBG("attr %p is not initialized", attr);
297 return false;
298 }
299
300 stacksize = __get_attr_stacksize(attr);
301 if (stacksize < PTHREAD_STACK_MIN) {
302 LOG_DBG("attr %p has stacksize %zu is smaller than PTHREAD_STACK_MIN (%zu)", attr,
303 stacksize, (size_t)PTHREAD_STACK_MIN);
304 return false;
305 }
306
307 /* require a valid scheduler policy */
308 if (!valid_posix_policy(attr->schedpolicy)) {
309 LOG_ERR("Invalid scheduler policy %d", attr->schedpolicy);
310 return false;
311 }
312
313 return true;
314 }
315
__attr_is_initialized(const struct posix_thread_attr * attr)316 static bool __attr_is_initialized(const struct posix_thread_attr *attr)
317 {
318 if (IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
319 return __attr_is_runnable(attr);
320 }
321
322 if (attr == NULL || !attr->initialized) {
323 LOG_DBG("attr %p is not initialized", attr);
324 return false;
325 }
326
327 return true;
328 }
329
330 /**
331 * @brief Set scheduling parameter attributes in thread attributes object.
332 *
333 * See IEEE 1003.1
334 */
pthread_attr_setschedparam(pthread_attr_t * _attr,const struct sched_param * schedparam)335 int pthread_attr_setschedparam(pthread_attr_t *_attr, const struct sched_param *schedparam)
336 {
337 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
338
339 if (!__attr_is_initialized(attr) || schedparam == NULL ||
340 !is_posix_policy_prio_valid(schedparam->sched_priority, attr->schedpolicy)) {
341 LOG_ERR("Invalid pthread_attr_t or sched_param");
342 return EINVAL;
343 }
344
345 attr->priority = schedparam->sched_priority;
346 return 0;
347 }
348
349 /**
350 * @brief Set stack attributes in thread attributes object.
351 *
352 * See IEEE 1003.1
353 */
pthread_attr_setstack(pthread_attr_t * _attr,void * stackaddr,size_t stacksize)354 int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksize)
355 {
356 int ret;
357 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
358
359 if (stackaddr == NULL) {
360 LOG_ERR("NULL stack address");
361 return EACCES;
362 }
363
364 if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
365 stacksize > PTHREAD_STACK_MAX) {
366 LOG_ERR("Invalid stacksize %zu", stacksize);
367 return EINVAL;
368 }
369
370 if (attr->stack != NULL) {
371 ret = k_thread_stack_free(attr->stack);
372 if (ret == 0) {
373 LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
374 __get_attr_stacksize(attr), attr->stack);
375 }
376 }
377
378 attr->stack = stackaddr;
379 __set_attr_stacksize(attr, stacksize);
380
381 LOG_DBG("Assigned thread stack %zu@%p to attr %p", __get_attr_stacksize(attr), attr->stack,
382 _attr);
383
384 return 0;
385 }
386
posix_thread_recycle_work_handler(struct k_work * work)387 static void posix_thread_recycle_work_handler(struct k_work *work)
388 {
389 ARG_UNUSED(work);
390 posix_thread_recycle();
391 }
392 static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_work_handler);
393
posix_thread_finalize(struct posix_thread * t,void * retval)394 static void posix_thread_finalize(struct posix_thread *t, void *retval)
395 {
396 sys_snode_t *node_l;
397 k_spinlock_key_t key;
398 pthread_key_obj *key_obj;
399 pthread_thread_data *thread_spec_data;
400
401 SYS_SLIST_FOR_EACH_NODE(&t->key_list, node_l) {
402 thread_spec_data = (pthread_thread_data *)node_l;
403 if (thread_spec_data != NULL) {
404 key_obj = thread_spec_data->key;
405 if (key_obj->destructor != NULL) {
406 (key_obj->destructor)(thread_spec_data->spec_data);
407 }
408 }
409 }
410
411 /* move thread from run_q to done_q */
412 key = k_spin_lock(&pthread_pool_lock);
413 sys_dlist_remove(&t->q_node);
414 posix_thread_q_set(t, POSIX_THREAD_DONE_Q);
415 t->retval = retval;
416 k_spin_unlock(&pthread_pool_lock, key);
417
418 /* trigger recycle work */
419 (void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS));
420
421 /* abort the underlying k_thread */
422 k_thread_abort(&t->thread);
423 }
424
425 FUNC_NORETURN
zephyr_thread_wrapper(void * arg1,void * arg2,void * arg3)426 static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
427 {
428 int err;
429 int barrier;
430 void *(*fun_ptr)(void *arg) = arg2;
431 struct posix_thread *t = CONTAINER_OF(k_current_get(), struct posix_thread, thread);
432
433 if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
434 /* cross the barrier so that pthread_create() can continue */
435 barrier = POINTER_TO_UINT(arg3);
436 err = pthread_barrier_wait(&barrier);
437 __ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
438 }
439
440 posix_thread_finalize(t, fun_ptr(arg1));
441
442 CODE_UNREACHABLE;
443 }
444
posix_thread_recycle(void)445 static void posix_thread_recycle(void)
446 {
447 k_spinlock_key_t key;
448 struct posix_thread *t;
449 struct posix_thread *safe_t;
450 sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables);
451
452 key = k_spin_lock(&pthread_pool_lock);
453 SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t, q_node) {
454 if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) {
455 /* thread has not been joined yet */
456 continue;
457 }
458
459 sys_dlist_remove(&t->q_node);
460 sys_dlist_append(&recyclables, &t->q_node);
461 }
462 k_spin_unlock(&pthread_pool_lock, key);
463
464 if (sys_dlist_is_empty(&recyclables)) {
465 return;
466 }
467
468 LOG_DBG("Recycling %zu threads", sys_dlist_len(&recyclables));
469
470 SYS_DLIST_FOR_EACH_CONTAINER(&recyclables, t, q_node) {
471 if (t->attr.caller_destroys) {
472 t->attr = (struct posix_thread_attr){0};
473 } else {
474 (void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
475 }
476 }
477
478 key = k_spin_lock(&pthread_pool_lock);
479 while (!sys_dlist_is_empty(&recyclables)) {
480 t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node);
481 posix_thread_q_set(t, POSIX_THREAD_READY_Q);
482 }
483 k_spin_unlock(&pthread_pool_lock, key);
484 }
485
486 /**
487 * @brief Create a new thread.
488 *
489 * Pthread attribute should not be NULL. API will return Error on NULL
490 * attribute value.
491 *
492 * See IEEE 1003.1
493 */
pthread_create(pthread_t * th,const pthread_attr_t * _attr,void * (* threadroutine)(void *),void * arg)494 int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadroutine)(void *),
495 void *arg)
496 {
497 int err;
498 pthread_barrier_t barrier;
499 struct posix_thread *t = NULL;
500
501 if (!(_attr == NULL || __attr_is_runnable((struct posix_thread_attr *)_attr))) {
502 return EINVAL;
503 }
504
505 /* reclaim resources greedily */
506 posix_thread_recycle();
507
508 K_SPINLOCK(&pthread_pool_lock) {
509 if (!sys_dlist_is_empty(&posix_thread_q[POSIX_THREAD_READY_Q])) {
510 t = CONTAINER_OF(sys_dlist_get(&posix_thread_q[POSIX_THREAD_READY_Q]),
511 struct posix_thread, q_node);
512
513 /* initialize thread state */
514 posix_thread_q_set(t, POSIX_THREAD_RUN_Q);
515 sys_slist_init(&t->key_list);
516 sys_slist_init(&t->cleanup_list);
517 }
518 }
519
520 if (t != NULL && IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
521 err = pthread_barrier_init(&barrier, NULL, 2);
522 if (err != 0) {
523 /* cannot allocate barrier. move thread back to ready_q */
524 K_SPINLOCK(&pthread_pool_lock) {
525 sys_dlist_remove(&t->q_node);
526 posix_thread_q_set(t, POSIX_THREAD_READY_Q);
527 }
528 t = NULL;
529 }
530 }
531
532 if (t == NULL) {
533 /* no threads are ready */
534 LOG_ERR("No threads are ready");
535 return EAGAIN;
536 }
537
538 if (_attr == NULL) {
539 err = pthread_attr_init((pthread_attr_t *)&t->attr);
540 if (err == 0 && !__attr_is_runnable(&t->attr)) {
541 (void)pthread_attr_destroy((pthread_attr_t *)&t->attr);
542 err = EINVAL;
543 }
544 if (err != 0) {
545 /* cannot allocate pthread attributes (e.g. stack) */
546 K_SPINLOCK(&pthread_pool_lock) {
547 sys_dlist_remove(&t->q_node);
548 posix_thread_q_set(t, POSIX_THREAD_READY_Q);
549 }
550 return err;
551 }
552 /* caller not responsible for destroying attr */
553 t->attr.caller_destroys = false;
554 } else {
555 /* copy user-provided attr into thread, caller must destroy attr at a later time */
556 t->attr = *(struct posix_thread_attr *)_attr;
557 }
558
559 /* spawn the thread */
560 k_thread_create(
561 &t->thread, t->attr.stack, __get_attr_stacksize(&t->attr) + t->attr.guardsize,
562 zephyr_thread_wrapper, (void *)arg, threadroutine,
563 IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER) ? UINT_TO_POINTER(barrier) : NULL,
564 posix_to_zephyr_priority(t->attr.priority, t->attr.schedpolicy), 0, K_NO_WAIT);
565
566 if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
567 /* wait for the spawned thread to cross our barrier */
568 err = pthread_barrier_wait(&barrier);
569 __ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
570 err = pthread_barrier_destroy(&barrier);
571 __ASSERT_NO_MSG(err == 0);
572 }
573
574 /* finally provide the initialized thread to the caller */
575 *th = mark_pthread_obj_initialized(posix_thread_to_offset(t));
576
577 LOG_DBG("Created pthread %p", &t->thread);
578
579 return 0;
580 }
581
pthread_getconcurrency(void)582 int pthread_getconcurrency(void)
583 {
584 int ret = 0;
585
586 K_SPINLOCK(&pthread_pool_lock) {
587 ret = pthread_concurrency;
588 }
589
590 return ret;
591 }
592
pthread_setconcurrency(int new_level)593 int pthread_setconcurrency(int new_level)
594 {
595 if (new_level < 0) {
596 return EINVAL;
597 }
598
599 if (new_level > CONFIG_MP_MAX_NUM_CPUS) {
600 return EAGAIN;
601 }
602
603 K_SPINLOCK(&pthread_pool_lock) {
604 pthread_concurrency = new_level;
605 }
606
607 return 0;
608 }
609
610 /**
611 * @brief Set cancelability State.
612 *
613 * See IEEE 1003.1
614 */
pthread_setcancelstate(int state,int * oldstate)615 int pthread_setcancelstate(int state, int *oldstate)
616 {
617 int ret = 0;
618 struct posix_thread *t;
619 bool cancel_pending = false;
620 bool cancel_type = PTHREAD_CANCEL_ENABLE;
621
622 if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) {
623 LOG_ERR("Invalid pthread state %d", state);
624 return EINVAL;
625 }
626
627 K_SPINLOCK(&pthread_pool_lock) {
628 t = to_posix_thread(pthread_self());
629 if (t == NULL) {
630 ret = EINVAL;
631 K_SPINLOCK_BREAK;
632 }
633
634 if (oldstate != NULL) {
635 *oldstate = t->attr.cancelstate;
636 }
637
638 t->attr.cancelstate = state;
639 cancel_pending = t->attr.cancelpending;
640 cancel_type = t->attr.canceltype;
641 }
642
643 if (state == PTHREAD_CANCEL_ENABLE && cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS &&
644 cancel_pending) {
645 posix_thread_finalize(t, PTHREAD_CANCELED);
646 }
647
648 return 0;
649 }
650
651 /**
652 * @brief Set cancelability Type.
653 *
654 * See IEEE 1003.1
655 */
pthread_setcanceltype(int type,int * oldtype)656 int pthread_setcanceltype(int type, int *oldtype)
657 {
658 int ret = 0;
659 struct posix_thread *t;
660
661 if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS) {
662 LOG_ERR("Invalid pthread cancel type %d", type);
663 return EINVAL;
664 }
665
666 K_SPINLOCK(&pthread_pool_lock) {
667 t = to_posix_thread(pthread_self());
668 if (t == NULL) {
669 ret = EINVAL;
670 K_SPINLOCK_BREAK;
671 }
672
673 if (oldtype != NULL) {
674 *oldtype = t->attr.canceltype;
675 }
676 t->attr.canceltype = type;
677 }
678
679 return ret;
680 }
681
682 /**
683 * @brief Create a cancellation point in the calling thread.
684 *
685 * See IEEE 1003.1
686 */
pthread_testcancel(void)687 void pthread_testcancel(void)
688 {
689 struct posix_thread *t;
690 bool cancel_pended = false;
691
692 K_SPINLOCK(&pthread_pool_lock) {
693 t = to_posix_thread(pthread_self());
694 if (t == NULL) {
695 K_SPINLOCK_BREAK;
696 }
697 if (t->attr.cancelstate != PTHREAD_CANCEL_ENABLE) {
698 K_SPINLOCK_BREAK;
699 }
700 if (t->attr.cancelpending) {
701 cancel_pended = true;
702 t->attr.cancelstate = PTHREAD_CANCEL_DISABLE;
703 }
704 }
705
706 if (cancel_pended) {
707 posix_thread_finalize(t, PTHREAD_CANCELED);
708 }
709 }
710
711 /**
712 * @brief Cancel execution of a thread.
713 *
714 * See IEEE 1003.1
715 */
pthread_cancel(pthread_t pthread)716 int pthread_cancel(pthread_t pthread)
717 {
718 int ret = 0;
719 bool cancel_state = PTHREAD_CANCEL_ENABLE;
720 bool cancel_type = PTHREAD_CANCEL_DEFERRED;
721 struct posix_thread *t = NULL;
722
723 K_SPINLOCK(&pthread_pool_lock) {
724 t = to_posix_thread(pthread);
725 if (t == NULL) {
726 ret = ESRCH;
727 K_SPINLOCK_BREAK;
728 }
729
730 if (!__attr_is_initialized(&t->attr)) {
731 /* thread has already terminated */
732 ret = ESRCH;
733 K_SPINLOCK_BREAK;
734 }
735
736 t->attr.cancelpending = true;
737 cancel_state = t->attr.cancelstate;
738 cancel_type = t->attr.canceltype;
739 }
740
741 if (ret == 0 && cancel_state == PTHREAD_CANCEL_ENABLE &&
742 cancel_type == PTHREAD_CANCEL_ASYNCHRONOUS) {
743 posix_thread_finalize(t, PTHREAD_CANCELED);
744 }
745
746 return ret;
747 }
748
749 /**
750 * @brief Set thread scheduling policy and parameters.
751 *
752 * See IEEE 1003.1
753 */
pthread_setschedparam(pthread_t pthread,int policy,const struct sched_param * param)754 int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param)
755 {
756 int ret = 0;
757 int new_prio = K_LOWEST_APPLICATION_THREAD_PRIO;
758 struct posix_thread *t = NULL;
759
760 if (param == NULL || !valid_posix_policy(policy) ||
761 !is_posix_policy_prio_valid(param->sched_priority, policy)) {
762 return EINVAL;
763 }
764
765 K_SPINLOCK(&pthread_pool_lock) {
766 t = to_posix_thread(pthread);
767 if (t == NULL) {
768 ret = ESRCH;
769 K_SPINLOCK_BREAK;
770 }
771
772 new_prio = posix_to_zephyr_priority(param->sched_priority, policy);
773 }
774
775 if (ret == 0) {
776 k_thread_priority_set(&t->thread, new_prio);
777 }
778
779 return ret;
780 }
781
782 /**
783 * @brief Initialise threads attribute object
784 *
785 * See IEEE 1003.1
786 */
pthread_attr_init(pthread_attr_t * _attr)787 int pthread_attr_init(pthread_attr_t *_attr)
788 {
789 struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
790
791 if (attr == NULL) {
792 LOG_ERR("Invalid attr pointer");
793 return ENOMEM;
794 }
795
796 BUILD_ASSERT(DYNAMIC_STACK_SIZE <= PTHREAD_STACK_MAX);
797
798 *attr = (struct posix_thread_attr){0};
799 attr->guardsize = CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT;
800
801 if (DYNAMIC_STACK_SIZE > 0) {
802 attr->stack = k_thread_stack_alloc(DYNAMIC_STACK_SIZE + attr->guardsize,
803 k_is_user_context() ? K_USER : 0);
804 if (attr->stack == NULL) {
805 LOG_DBG("Did not auto-allocate thread stack");
806 } else {
807 __set_attr_stacksize(attr, DYNAMIC_STACK_SIZE);
808 __ASSERT_NO_MSG(__attr_is_initialized(attr));
809 LOG_DBG("Allocated thread stack %zu@%p", __get_attr_stacksize(attr),
810 attr->stack);
811 }
812 }
813
814 /* caller responsible for destroying attr */
815 attr->initialized = true;
816
817 LOG_DBG("Initialized attr %p", _attr);
818
819 return 0;
820 }
821
822 /**
823 * @brief Get thread scheduling policy and parameters
824 *
825 * See IEEE 1003.1
826 */
pthread_getschedparam(pthread_t pthread,int * policy,struct sched_param * param)827 int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
828 {
829 int ret = 0;
830 struct posix_thread *t;
831
832 if (policy == NULL || param == NULL) {
833 return EINVAL;
834 }
835
836 K_SPINLOCK(&pthread_pool_lock) {
837 t = to_posix_thread(pthread);
838 if (t == NULL) {
839 ret = ESRCH;
840 K_SPINLOCK_BREAK;
841 }
842
843 if (!__attr_is_initialized(&t->attr)) {
844 ret = ESRCH;
845 K_SPINLOCK_BREAK;
846 }
847
848 param->sched_priority =
849 zephyr_to_posix_priority(k_thread_priority_get(&t->thread), policy);
850 }
851
852 return ret;
853 }
854
855 /**
856 * @brief Dynamic package initialization
857 *
858 * See IEEE 1003.1
859 */
pthread_once(pthread_once_t * once,void (* init_func)(void))860 int pthread_once(pthread_once_t *once, void (*init_func)(void))
861 {
862 __unused int ret;
863 bool run_init_func = false;
864 struct pthread_once *const _once = (struct pthread_once *)once;
865
866 if (init_func == NULL) {
867 return EINVAL;
868 }
869
870 K_SPINLOCK(&pthread_pool_lock) {
871 if (!_once->flag) {
872 run_init_func = true;
873 _once->flag = true;
874 }
875 }
876
877 if (run_init_func) {
878 init_func();
879 }
880
881 return 0;
882 }
883
884 /**
885 * @brief Terminate calling thread.
886 *
887 * See IEEE 1003.1
888 */
889 FUNC_NORETURN
pthread_exit(void * retval)890 void pthread_exit(void *retval)
891 {
892 struct posix_thread *self = NULL;
893
894 K_SPINLOCK(&pthread_pool_lock) {
895 self = to_posix_thread(pthread_self());
896 if (self == NULL) {
897 K_SPINLOCK_BREAK;
898 }
899
900 /* Mark a thread as cancellable before exiting */
901 self->attr.cancelstate = PTHREAD_CANCEL_ENABLE;
902 }
903
904 if (self == NULL) {
905 /* not a valid posix_thread */
906 LOG_DBG("Aborting non-pthread %p", k_current_get());
907 k_thread_abort(k_current_get());
908
909 CODE_UNREACHABLE;
910 }
911
912 posix_thread_finalize(self, retval);
913 CODE_UNREACHABLE;
914 }
915
916 /**
917 * @brief Wait for a thread termination.
918 *
919 * See IEEE 1003.1
920 */
pthread_join(pthread_t pthread,void ** status)921 int pthread_join(pthread_t pthread, void **status)
922 {
923 int ret = 0;
924 struct posix_thread *t = NULL;
925
926 if (pthread == pthread_self()) {
927 LOG_ERR("Pthread attempted to join itself (%x)", pthread);
928 return EDEADLK;
929 }
930
931 K_SPINLOCK(&pthread_pool_lock) {
932 t = to_posix_thread(pthread);
933 if (t == NULL) {
934 ret = ESRCH;
935 K_SPINLOCK_BREAK;
936 }
937
938 LOG_DBG("Pthread %p joining..", &t->thread);
939
940 if (t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
941 /* undefined behaviour */
942 ret = EINVAL;
943 K_SPINLOCK_BREAK;
944 }
945
946 if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q) {
947 ret = ESRCH;
948 K_SPINLOCK_BREAK;
949 }
950
951 /*
952 * thread is joinable and is in run_q or done_q.
953 * let's ensure that the thread cannot be joined again after this point.
954 */
955 t->attr.detachstate = PTHREAD_CREATE_DETACHED;
956 }
957
958 switch (ret) {
959 case ESRCH:
960 LOG_ERR("Pthread %p has already been joined", &t->thread);
961 return ret;
962 case EINVAL:
963 LOG_ERR("Pthread %p is not a joinable", &t->thread);
964 return ret;
965 case 0:
966 break;
967 }
968
969 ret = k_thread_join(&t->thread, K_FOREVER);
970 /* other possibilities? */
971 __ASSERT_NO_MSG(ret == 0);
972
973 LOG_DBG("Joined pthread %p", &t->thread);
974
975 if (status != NULL) {
976 LOG_DBG("Writing status to %p", status);
977 *status = t->retval;
978 }
979
980 posix_thread_recycle();
981
982 return 0;
983 }
984
985 /**
986 * @brief Detach a thread.
987 *
988 * See IEEE 1003.1
989 */
pthread_detach(pthread_t pthread)990 int pthread_detach(pthread_t pthread)
991 {
992 int ret = 0;
993 struct posix_thread *t;
994
995 K_SPINLOCK(&pthread_pool_lock) {
996 t = to_posix_thread(pthread);
997 if (t == NULL) {
998 ret = ESRCH;
999 K_SPINLOCK_BREAK;
1000 }
1001
1002 if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
1003 t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
1004 LOG_ERR("Pthread %p cannot be detached", &t->thread);
1005 ret = EINVAL;
1006 K_SPINLOCK_BREAK;
1007 }
1008
1009 t->attr.detachstate = PTHREAD_CREATE_DETACHED;
1010 }
1011
1012 if (ret == 0) {
1013 LOG_DBG("Pthread %p detached", &t->thread);
1014 }
1015
1016 return ret;
1017 }
1018
1019 /**
1020 * @brief Get detach state attribute in thread attributes object.
1021 *
1022 * See IEEE 1003.1
1023 */
pthread_attr_getdetachstate(const pthread_attr_t * _attr,int * detachstate)1024 int pthread_attr_getdetachstate(const pthread_attr_t *_attr, int *detachstate)
1025 {
1026 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1027
1028 if (!__attr_is_initialized(attr) || (detachstate == NULL)) {
1029 return EINVAL;
1030 }
1031
1032 *detachstate = attr->detachstate;
1033 return 0;
1034 }
1035
1036 /**
1037 * @brief Set detach state attribute in thread attributes object.
1038 *
1039 * See IEEE 1003.1
1040 */
pthread_attr_setdetachstate(pthread_attr_t * _attr,int detachstate)1041 int pthread_attr_setdetachstate(pthread_attr_t *_attr, int detachstate)
1042 {
1043 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1044
1045 if (!__attr_is_initialized(attr) || ((detachstate != PTHREAD_CREATE_DETACHED) &&
1046 (detachstate != PTHREAD_CREATE_JOINABLE))) {
1047 return EINVAL;
1048 }
1049
1050 attr->detachstate = detachstate;
1051 return 0;
1052 }
1053
1054 /**
1055 * @brief Get scheduling policy attribute in Thread attributes.
1056 *
1057 * See IEEE 1003.1
1058 */
pthread_attr_getschedpolicy(const pthread_attr_t * _attr,int * policy)1059 int pthread_attr_getschedpolicy(const pthread_attr_t *_attr, int *policy)
1060 {
1061 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1062
1063 if (!__attr_is_initialized(attr) || (policy == NULL)) {
1064 return EINVAL;
1065 }
1066
1067 *policy = attr->schedpolicy;
1068 return 0;
1069 }
1070
1071 /**
1072 * @brief Set scheduling policy attribute in Thread attributes object.
1073 *
1074 * See IEEE 1003.1
1075 */
pthread_attr_setschedpolicy(pthread_attr_t * _attr,int policy)1076 int pthread_attr_setschedpolicy(pthread_attr_t *_attr, int policy)
1077 {
1078 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1079
1080 if (!__attr_is_initialized(attr) || !valid_posix_policy(policy)) {
1081 return EINVAL;
1082 }
1083
1084 attr->schedpolicy = policy;
1085 return 0;
1086 }
1087
1088 /**
1089 * @brief Get stack size attribute in thread attributes object.
1090 *
1091 * See IEEE 1003.1
1092 */
pthread_attr_getstacksize(const pthread_attr_t * _attr,size_t * stacksize)1093 int pthread_attr_getstacksize(const pthread_attr_t *_attr, size_t *stacksize)
1094 {
1095 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1096
1097 if (!__attr_is_initialized(attr) || (stacksize == NULL)) {
1098 return EINVAL;
1099 }
1100
1101 *stacksize = __get_attr_stacksize(attr);
1102 return 0;
1103 }
1104
1105 /**
1106 * @brief Set stack size attribute in thread attributes object.
1107 *
1108 * See IEEE 1003.1
1109 */
pthread_attr_setstacksize(pthread_attr_t * _attr,size_t stacksize)1110 int pthread_attr_setstacksize(pthread_attr_t *_attr, size_t stacksize)
1111 {
1112 int ret;
1113 void *new_stack;
1114 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1115
1116 if (!__attr_is_initialized(attr) || stacksize == 0 || stacksize < PTHREAD_STACK_MIN ||
1117 stacksize > PTHREAD_STACK_MAX) {
1118 return EINVAL;
1119 }
1120
1121 if (__get_attr_stacksize(attr) == stacksize) {
1122 return 0;
1123 }
1124
1125 new_stack =
1126 k_thread_stack_alloc(stacksize + attr->guardsize, k_is_user_context() ? K_USER : 0);
1127 if (new_stack == NULL) {
1128 if (stacksize < __get_attr_stacksize(attr)) {
1129 __set_attr_stacksize(attr, stacksize);
1130 return 0;
1131 }
1132
1133 LOG_DBG("k_thread_stack_alloc(%zu) failed",
1134 __get_attr_stacksize(attr) + attr->guardsize);
1135 return ENOMEM;
1136 }
1137 LOG_DBG("Allocated thread stack %zu@%p", stacksize + attr->guardsize, attr->stack);
1138
1139 if (attr->stack != NULL) {
1140 ret = k_thread_stack_free(attr->stack);
1141 if (ret == 0) {
1142 LOG_DBG("Freed attr %p thread stack %zu@%p", _attr,
1143 __get_attr_stacksize(attr), attr->stack);
1144 }
1145 }
1146
1147 __set_attr_stacksize(attr, stacksize);
1148 attr->stack = new_stack;
1149
1150 return 0;
1151 }
1152
1153 /**
1154 * @brief Get stack attributes in thread attributes object.
1155 *
1156 * See IEEE 1003.1
1157 */
pthread_attr_getstack(const pthread_attr_t * _attr,void ** stackaddr,size_t * stacksize)1158 int pthread_attr_getstack(const pthread_attr_t *_attr, void **stackaddr, size_t *stacksize)
1159 {
1160 const struct posix_thread_attr *attr = (const struct posix_thread_attr *)_attr;
1161
1162 if (!__attr_is_initialized(attr) || (stackaddr == NULL) || (stacksize == NULL)) {
1163 return EINVAL;
1164 }
1165
1166 *stackaddr = attr->stack;
1167 *stacksize = __get_attr_stacksize(attr);
1168 return 0;
1169 }
1170
pthread_attr_getguardsize(const pthread_attr_t * ZRESTRICT _attr,size_t * ZRESTRICT guardsize)1171 int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT _attr, size_t *ZRESTRICT guardsize)
1172 {
1173 struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1174
1175 if (!__attr_is_initialized(attr) || guardsize == NULL) {
1176 return EINVAL;
1177 }
1178
1179 *guardsize = attr->guardsize;
1180
1181 return 0;
1182 }
1183
pthread_attr_setguardsize(pthread_attr_t * _attr,size_t guardsize)1184 int pthread_attr_setguardsize(pthread_attr_t *_attr, size_t guardsize)
1185 {
1186 struct posix_thread_attr *const attr = (struct posix_thread_attr *)_attr;
1187
1188 if (!__attr_is_initialized(attr) || guardsize > PTHREAD_GUARD_MAX) {
1189 return EINVAL;
1190 }
1191
1192 attr->guardsize = guardsize;
1193
1194 return 0;
1195 }
1196
1197 /**
1198 * @brief Get thread attributes object scheduling parameters.
1199 *
1200 * See IEEE 1003.1
1201 */
pthread_attr_getschedparam(const pthread_attr_t * _attr,struct sched_param * schedparam)1202 int pthread_attr_getschedparam(const pthread_attr_t *_attr, struct sched_param *schedparam)
1203 {
1204 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1205
1206 if (!__attr_is_initialized(attr) || (schedparam == NULL)) {
1207 return EINVAL;
1208 }
1209
1210 schedparam->sched_priority = attr->priority;
1211 return 0;
1212 }
1213
1214 /**
1215 * @brief Destroy thread attributes object.
1216 *
1217 * See IEEE 1003.1
1218 */
pthread_attr_destroy(pthread_attr_t * _attr)1219 int pthread_attr_destroy(pthread_attr_t *_attr)
1220 {
1221 int ret;
1222 struct posix_thread_attr *attr = (struct posix_thread_attr *)_attr;
1223
1224 if (!__attr_is_initialized(attr)) {
1225 return EINVAL;
1226 }
1227
1228 ret = k_thread_stack_free(attr->stack);
1229 if (ret == 0) {
1230 LOG_DBG("Freed attr %p thread stack %zu@%p", _attr, __get_attr_stacksize(attr),
1231 attr->stack);
1232 }
1233
1234 *attr = (struct posix_thread_attr){0};
1235 LOG_DBG("Destroyed attr %p", _attr);
1236
1237 return 0;
1238 }
1239
pthread_setname_np(pthread_t thread,const char * name)1240 int pthread_setname_np(pthread_t thread, const char *name)
1241 {
1242 #ifdef CONFIG_THREAD_NAME
1243 k_tid_t kthread;
1244
1245 thread = get_posix_thread_idx(thread);
1246 if (thread >= CONFIG_MAX_PTHREAD_COUNT) {
1247 return ESRCH;
1248 }
1249
1250 kthread = &posix_thread_pool[thread].thread;
1251
1252 if (name == NULL) {
1253 return EINVAL;
1254 }
1255
1256 return k_thread_name_set(kthread, name);
1257 #else
1258 ARG_UNUSED(thread);
1259 ARG_UNUSED(name);
1260 return 0;
1261 #endif
1262 }
1263
pthread_getname_np(pthread_t thread,char * name,size_t len)1264 int pthread_getname_np(pthread_t thread, char *name, size_t len)
1265 {
1266 #ifdef CONFIG_THREAD_NAME
1267 k_tid_t kthread;
1268
1269 thread = get_posix_thread_idx(thread);
1270 if (thread >= CONFIG_MAX_PTHREAD_COUNT) {
1271 return ESRCH;
1272 }
1273
1274 if (name == NULL) {
1275 return EINVAL;
1276 }
1277
1278 memset(name, '\0', len);
1279 kthread = &posix_thread_pool[thread].thread;
1280 return k_thread_name_copy(kthread, name, len - 1);
1281 #else
1282 ARG_UNUSED(thread);
1283 ARG_UNUSED(name);
1284 ARG_UNUSED(len);
1285 return 0;
1286 #endif
1287 }
1288
pthread_atfork(void (* prepare)(void),void (* parent)(void),void (* child)(void))1289 int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
1290 {
1291 ARG_UNUSED(prepare);
1292 ARG_UNUSED(parent);
1293 ARG_UNUSED(child);
1294
1295 return ENOSYS;
1296 }
1297
1298 /* this should probably go into signal.c but we need access to the lock */
pthread_sigmask(int how,const sigset_t * ZRESTRICT set,sigset_t * ZRESTRICT oset)1299 int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT oset)
1300 {
1301 int ret = 0;
1302 struct posix_thread *t;
1303
1304 if (!(how == SIG_BLOCK || how == SIG_SETMASK || how == SIG_UNBLOCK)) {
1305 return EINVAL;
1306 }
1307
1308 K_SPINLOCK(&pthread_pool_lock) {
1309 t = to_posix_thread(pthread_self());
1310 if (t == NULL) {
1311 ret = ESRCH;
1312 K_SPINLOCK_BREAK;
1313 }
1314
1315 if (oset != NULL) {
1316 *oset = t->sigset;
1317 }
1318
1319 if (set == NULL) {
1320 K_SPINLOCK_BREAK;
1321 }
1322
1323 switch (how) {
1324 case SIG_BLOCK:
1325 for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1326 t->sigset.sig[i] |= set->sig[i];
1327 }
1328 break;
1329 case SIG_SETMASK:
1330 t->sigset = *set;
1331 break;
1332 case SIG_UNBLOCK:
1333 for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
1334 t->sigset.sig[i] &= ~set->sig[i];
1335 }
1336 break;
1337 }
1338 }
1339
1340 return ret;
1341 }
1342
posix_thread_pool_init(void)1343 static int posix_thread_pool_init(void)
1344 {
1345 size_t i;
1346
1347 for (i = 0; i < CONFIG_MAX_PTHREAD_COUNT; ++i) {
1348 posix_thread_q_set(&posix_thread_pool[i], POSIX_THREAD_READY_Q);
1349 }
1350
1351 return 0;
1352 }
1353 SYS_INIT(posix_thread_pool_init, PRE_KERNEL_1, 0);
1354