1 // Copyright 2018 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
16 // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
17 //
18
19 #include <time.h>
20 #include <errno.h>
21 #include <pthread.h>
22 #include <string.h>
23 #include "esp_err.h"
24 #include "esp_attr.h"
25 #include "sys/queue.h"
26 #include "freertos/FreeRTOS.h"
27 #include "freertos/task.h"
28 #include "freertos/semphr.h"
29 #include "soc/soc_memory_layout.h"
30
31 #include "pthread_internal.h"
32 #include "esp_pthread.h"
33
34 #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
35 #include "esp_log.h"
36 const static char *TAG = "pthread";
37
38 /** task state */
39 enum esp_pthread_task_state {
40 PTHREAD_TASK_STATE_RUN,
41 PTHREAD_TASK_STATE_EXIT
42 };
43
44 /** pthread thread FreeRTOS wrapper */
45 typedef struct esp_pthread_entry {
46 SLIST_ENTRY(esp_pthread_entry) list_node; ///< Tasks list node struct.
47 TaskHandle_t handle; ///< FreeRTOS task handle
48 TaskHandle_t join_task; ///< Handle of the task waiting to join
49 enum esp_pthread_task_state state; ///< pthread task state
50 bool detached; ///< True if pthread is detached
51 void *retval; ///< Value supplied to calling thread during join
52 void *task_arg; ///< Task arguments
53 } esp_pthread_t;
54
55 /** pthread wrapper task arg */
56 typedef struct {
57 void *(*func)(void *); ///< user task entry
58 void *arg; ///< user task argument
59 esp_pthread_cfg_t cfg; ///< pthread configuration
60 } esp_pthread_task_arg_t;
61
62 /** pthread mutex FreeRTOS wrapper */
63 typedef struct {
64 SemaphoreHandle_t sem; ///< Handle of the task waiting to join
65 int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
66 } esp_pthread_mutex_t;
67
68
69 static SemaphoreHandle_t s_threads_mux = NULL;
70 static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
71 static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list
72 = SLIST_HEAD_INITIALIZER(s_threads_list);
73 static pthread_key_t s_pthread_cfg_key;
74
75
76 static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
77
esp_pthread_cfg_key_destructor(void * value)78 static void esp_pthread_cfg_key_destructor(void *value)
79 {
80 free(value);
81 }
82
esp_pthread_init(void)83 esp_err_t esp_pthread_init(void)
84 {
85 if (pthread_key_create(&s_pthread_cfg_key, esp_pthread_cfg_key_destructor) != 0) {
86 return ESP_ERR_NO_MEM;
87 }
88 s_threads_mux = xSemaphoreCreateMutex();
89 if (s_threads_mux == NULL) {
90 pthread_key_delete(s_pthread_cfg_key);
91 return ESP_ERR_NO_MEM;
92 }
93 return ESP_OK;
94 }
95
pthread_list_find_item(void * (* item_check)(esp_pthread_t *,void * arg),void * check_arg)96 static void *pthread_list_find_item(void *(*item_check)(esp_pthread_t *, void *arg), void *check_arg)
97 {
98 esp_pthread_t *it;
99 SLIST_FOREACH(it, &s_threads_list, list_node) {
100 void *val = item_check(it, check_arg);
101 if (val) {
102 return val;
103 }
104 }
105 return NULL;
106 }
107
pthread_get_handle_by_desc(esp_pthread_t * item,void * desc)108 static void *pthread_get_handle_by_desc(esp_pthread_t *item, void *desc)
109 {
110 if (item == desc) {
111 return item->handle;
112 }
113 return NULL;
114 }
115
pthread_get_desc_by_handle(esp_pthread_t * item,void * hnd)116 static void *pthread_get_desc_by_handle(esp_pthread_t *item, void *hnd)
117 {
118 if (hnd == item->handle) {
119 return item;
120 }
121 return NULL;
122 }
123
pthread_find_handle(pthread_t thread)124 static inline TaskHandle_t pthread_find_handle(pthread_t thread)
125 {
126 return pthread_list_find_item(pthread_get_handle_by_desc, (void *)thread);
127 }
128
pthread_find(TaskHandle_t task_handle)129 static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
130 {
131 return pthread_list_find_item(pthread_get_desc_by_handle, task_handle);
132 }
133
pthread_delete(esp_pthread_t * pthread)134 static void pthread_delete(esp_pthread_t *pthread)
135 {
136 SLIST_REMOVE(&s_threads_list, pthread, esp_pthread_entry, list_node);
137 free(pthread);
138 }
139
140 /* Call this function to configure pthread stacks in Pthreads */
esp_pthread_set_cfg(const esp_pthread_cfg_t * cfg)141 esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg)
142 {
143 if (cfg->stack_size < PTHREAD_STACK_MIN) {
144 return ESP_ERR_INVALID_ARG;
145 }
146
147 /* If a value is already set, update that value */
148 esp_pthread_cfg_t *p = pthread_getspecific(s_pthread_cfg_key);
149 if (!p) {
150 p = malloc(sizeof(esp_pthread_cfg_t));
151 if (!p) {
152 return ESP_ERR_NO_MEM;
153 }
154 }
155 *p = *cfg;
156 pthread_setspecific(s_pthread_cfg_key, p);
157 return 0;
158 }
159
esp_pthread_get_cfg(esp_pthread_cfg_t * p)160 esp_err_t esp_pthread_get_cfg(esp_pthread_cfg_t *p)
161 {
162 esp_pthread_cfg_t *cfg = pthread_getspecific(s_pthread_cfg_key);
163 if (cfg) {
164 *p = *cfg;
165 return ESP_OK;
166 }
167 memset(p, 0, sizeof(*p));
168 return ESP_ERR_NOT_FOUND;
169 }
170
get_default_pthread_core(void)171 static int get_default_pthread_core(void)
172 {
173 return CONFIG_PTHREAD_TASK_CORE_DEFAULT == -1 ? tskNO_AFFINITY : CONFIG_PTHREAD_TASK_CORE_DEFAULT;
174 }
175
esp_pthread_get_default_config(void)176 esp_pthread_cfg_t esp_pthread_get_default_config(void)
177 {
178 esp_pthread_cfg_t cfg = {
179 .stack_size = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT,
180 .prio = CONFIG_PTHREAD_TASK_PRIO_DEFAULT,
181 .inherit_cfg = false,
182 .thread_name = NULL,
183 .pin_to_core = get_default_pthread_core()
184 };
185
186 return cfg;
187 }
188
pthread_task_func(void * arg)189 static void pthread_task_func(void *arg)
190 {
191 void *rval = NULL;
192 esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
193
194 ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
195
196 // wait for start
197 xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
198
199 if (task_arg->cfg.inherit_cfg) {
200 /* If inherit option is set, then do a set_cfg() ourselves for future forks,
201 but first set thread_name to NULL to enable inheritance of the name too.
202 (This also to prevents dangling pointers to name of tasks that might
203 possibly have been deleted when we use the configuration).*/
204 esp_pthread_cfg_t *cfg = &task_arg->cfg;
205 cfg->thread_name = NULL;
206 esp_pthread_set_cfg(cfg);
207 }
208 ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
209 rval = task_arg->func(task_arg->arg);
210 ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
211
212 pthread_exit(rval);
213
214 ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
215 }
216
pthread_create(pthread_t * thread,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)217 int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
218 void *(*start_routine) (void *), void *arg)
219 {
220 TaskHandle_t xHandle = NULL;
221
222 ESP_LOGV(TAG, "%s", __FUNCTION__);
223 esp_pthread_task_arg_t *task_arg = calloc(1, sizeof(esp_pthread_task_arg_t));
224 if (task_arg == NULL) {
225 ESP_LOGE(TAG, "Failed to allocate task args!");
226 return ENOMEM;
227 }
228
229 esp_pthread_t *pthread = calloc(1, sizeof(esp_pthread_t));
230 if (pthread == NULL) {
231 ESP_LOGE(TAG, "Failed to allocate pthread data!");
232 free(task_arg);
233 return ENOMEM;
234 }
235
236 uint32_t stack_size = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT;
237 BaseType_t prio = CONFIG_PTHREAD_TASK_PRIO_DEFAULT;
238 BaseType_t core_id = get_default_pthread_core();
239 const char *task_name = CONFIG_PTHREAD_TASK_NAME_DEFAULT;
240
241 esp_pthread_cfg_t *pthread_cfg = pthread_getspecific(s_pthread_cfg_key);
242 if (pthread_cfg) {
243 if (pthread_cfg->stack_size) {
244 stack_size = pthread_cfg->stack_size;
245 }
246 if (pthread_cfg->prio && pthread_cfg->prio < configMAX_PRIORITIES) {
247 prio = pthread_cfg->prio;
248 }
249
250 if (pthread_cfg->inherit_cfg) {
251 if (pthread_cfg->thread_name == NULL) {
252 // Inherit task name from current task.
253 task_name = pcTaskGetTaskName(NULL);
254 } else {
255 // Inheriting, but new task name.
256 task_name = pthread_cfg->thread_name;
257 }
258 } else if (pthread_cfg->thread_name == NULL) {
259 task_name = CONFIG_PTHREAD_TASK_NAME_DEFAULT;
260 } else {
261 task_name = pthread_cfg->thread_name;
262 }
263
264 if (pthread_cfg->pin_to_core >= 0 && pthread_cfg->pin_to_core < portNUM_PROCESSORS) {
265 core_id = pthread_cfg->pin_to_core;
266 }
267
268 task_arg->cfg = *pthread_cfg;
269 }
270
271 if (attr) {
272 /* Overwrite attributes */
273 stack_size = attr->stacksize;
274
275 switch (attr->detachstate) {
276 case PTHREAD_CREATE_DETACHED:
277 pthread->detached = true;
278 break;
279 case PTHREAD_CREATE_JOINABLE:
280 default:
281 pthread->detached = false;
282 }
283 }
284
285 task_arg->func = start_routine;
286 task_arg->arg = arg;
287 pthread->task_arg = task_arg;
288 BaseType_t res = xTaskCreatePinnedToCore(&pthread_task_func,
289 task_name,
290 // stack_size is in bytes. This transformation ensures that the units are
291 // transformed to the units used in FreeRTOS.
292 // Note: float division of ceil(m / n) ==
293 // integer division of (m + n - 1) / n
294 (stack_size + sizeof(StackType_t) - 1) / sizeof(StackType_t),
295 task_arg,
296 prio,
297 &xHandle,
298 core_id);
299
300 if (res != pdPASS) {
301 ESP_LOGE(TAG, "Failed to create task!");
302 free(pthread);
303 free(task_arg);
304 if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
305 return ENOMEM;
306 } else {
307 return EAGAIN;
308 }
309 }
310 pthread->handle = xHandle;
311
312 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
313 assert(false && "Failed to lock threads list!");
314 }
315 SLIST_INSERT_HEAD(&s_threads_list, pthread, list_node);
316 xSemaphoreGive(s_threads_mux);
317
318 // start task
319 xTaskNotify(xHandle, 0, eNoAction);
320
321 *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
322
323 ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
324
325 return 0;
326 }
327
pthread_join(pthread_t thread,void ** retval)328 int pthread_join(pthread_t thread, void **retval)
329 {
330 esp_pthread_t *pthread = (esp_pthread_t *)thread;
331 int ret = 0;
332 bool wait = false;
333 void *child_task_retval = 0;
334
335 ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
336
337 // find task
338 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
339 assert(false && "Failed to lock threads list!");
340 }
341 TaskHandle_t handle = pthread_find_handle(thread);
342 if (!handle) {
343 // not found
344 ret = ESRCH;
345 } else if (pthread->detached) {
346 // Thread is detached
347 ret = EDEADLK;
348 } else if (pthread->join_task) {
349 // already have waiting task to join
350 ret = EINVAL;
351 } else if (handle == xTaskGetCurrentTaskHandle()) {
352 // join to self not allowed
353 ret = EDEADLK;
354 } else {
355 esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
356 if (cur_pthread && cur_pthread->join_task == handle) {
357 // join to each other not allowed
358 ret = EDEADLK;
359 } else {
360 if (pthread->state == PTHREAD_TASK_STATE_RUN) {
361 pthread->join_task = xTaskGetCurrentTaskHandle();
362 wait = true;
363 } else {
364 child_task_retval = pthread->retval;
365 pthread_delete(pthread);
366 }
367 }
368 }
369 xSemaphoreGive(s_threads_mux);
370
371 if (ret == 0) {
372 if (wait) {
373 xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
374 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
375 assert(false && "Failed to lock threads list!");
376 }
377 child_task_retval = pthread->retval;
378 pthread_delete(pthread);
379 xSemaphoreGive(s_threads_mux);
380 }
381 vTaskDelete(handle);
382 }
383
384 if (retval) {
385 *retval = child_task_retval;
386 }
387
388 ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
389 return ret;
390 }
391
pthread_detach(pthread_t thread)392 int pthread_detach(pthread_t thread)
393 {
394 esp_pthread_t *pthread = (esp_pthread_t *)thread;
395 int ret = 0;
396
397 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
398 assert(false && "Failed to lock threads list!");
399 }
400 TaskHandle_t handle = pthread_find_handle(thread);
401 if (!handle) {
402 ret = ESRCH;
403 } else if (pthread->detached) {
404 // already detached
405 ret = EINVAL;
406 } else if (pthread->join_task) {
407 // already have waiting task to join
408 ret = EINVAL;
409 } else if (pthread->state == PTHREAD_TASK_STATE_RUN) {
410 // pthread still running
411 pthread->detached = true;
412 } else {
413 // pthread already stopped
414 pthread_delete(pthread);
415 vTaskDelete(handle);
416 }
417 xSemaphoreGive(s_threads_mux);
418 ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
419 return ret;
420 }
421
pthread_exit(void * value_ptr)422 void pthread_exit(void *value_ptr)
423 {
424 bool detached = false;
425 /* preemptively clean up thread local storage, rather than
426 waiting for the idle task to clean up the thread */
427 pthread_internal_local_storage_destructor_callback();
428
429 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
430 assert(false && "Failed to lock threads list!");
431 }
432 esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
433 if (!pthread) {
434 assert(false && "Failed to find pthread for current task!");
435 }
436 if (pthread->task_arg) {
437 free(pthread->task_arg);
438 }
439 if (pthread->detached) {
440 // auto-free for detached threads
441 pthread_delete(pthread);
442 detached = true;
443 } else {
444 // Set return value
445 pthread->retval = value_ptr;
446 // Remove from list, it indicates that task has exited
447 if (pthread->join_task) {
448 // notify join
449 xTaskNotify(pthread->join_task, 0, eNoAction);
450 } else {
451 pthread->state = PTHREAD_TASK_STATE_EXIT;
452 }
453 }
454 xSemaphoreGive(s_threads_mux);
455
456 ESP_LOGD(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
457
458 if (detached) {
459 vTaskDelete(NULL);
460 } else {
461 vTaskSuspend(NULL);
462 }
463
464 // Should never be reached
465 abort();
466 }
467
pthread_cancel(pthread_t thread)468 int pthread_cancel(pthread_t thread)
469 {
470 ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
471 return ENOSYS;
472 }
473
sched_yield(void)474 int sched_yield( void )
475 {
476 vTaskDelay(0);
477 return 0;
478 }
479
pthread_self(void)480 pthread_t pthread_self(void)
481 {
482 if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
483 assert(false && "Failed to lock threads list!");
484 }
485 esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
486 if (!pthread) {
487 assert(false && "Failed to find current thread ID!");
488 }
489 xSemaphoreGive(s_threads_mux);
490 return (pthread_t)pthread;
491 }
492
pthread_equal(pthread_t t1,pthread_t t2)493 int pthread_equal(pthread_t t1, pthread_t t2)
494 {
495 return t1 == t2 ? 1 : 0;
496 }
497
498 /***************** ONCE ******************/
pthread_once(pthread_once_t * once_control,void (* init_routine)(void))499 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
500 {
501 if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
502 ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
503 return EINVAL;
504 }
505
506 uint32_t res = 1;
507 #if defined(CONFIG_SPIRAM)
508 if (esp_ptr_external_ram(once_control)) {
509 uxPortCompareSetExtram((uint32_t *) &once_control->init_executed, 0, &res);
510 } else {
511 #endif
512 uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res);
513 #if defined(CONFIG_SPIRAM)
514 }
515 #endif
516 // Check if compare and set was successful
517 if (res == 0) {
518 ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
519 init_routine();
520 }
521
522 return 0;
523 }
524
525 /***************** MUTEX ******************/
mutexattr_check(const pthread_mutexattr_t * attr)526 static int mutexattr_check(const pthread_mutexattr_t *attr)
527 {
528 if (attr->type != PTHREAD_MUTEX_NORMAL &&
529 attr->type != PTHREAD_MUTEX_RECURSIVE &&
530 attr->type != PTHREAD_MUTEX_ERRORCHECK) {
531 return EINVAL;
532 }
533 return 0;
534 }
535
pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)536 int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
537 {
538 int type = PTHREAD_MUTEX_NORMAL;
539
540 if (!mutex) {
541 return EINVAL;
542 }
543
544 if (attr) {
545 if (!attr->is_initialized) {
546 return EINVAL;
547 }
548 int res = mutexattr_check(attr);
549 if (res) {
550 return res;
551 }
552 type = attr->type;
553 }
554
555 esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
556 if (!mux) {
557 return ENOMEM;
558 }
559 mux->type = type;
560
561 if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
562 mux->sem = xSemaphoreCreateRecursiveMutex();
563 } else {
564 mux->sem = xSemaphoreCreateMutex();
565 }
566 if (!mux->sem) {
567 free(mux);
568 return EAGAIN;
569 }
570
571 *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
572
573 return 0;
574 }
575
pthread_mutex_destroy(pthread_mutex_t * mutex)576 int pthread_mutex_destroy(pthread_mutex_t *mutex)
577 {
578 esp_pthread_mutex_t *mux;
579
580 ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
581
582 if (!mutex) {
583 return EINVAL;
584 }
585 mux = (esp_pthread_mutex_t *)*mutex;
586 if (!mux) {
587 return EINVAL;
588 }
589
590 // check if mux is busy
591 int res = pthread_mutex_lock_internal(mux, 0);
592 if (res == EBUSY) {
593 return EBUSY;
594 }
595
596 if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
597 res = xSemaphoreGiveRecursive(mux->sem);
598 } else {
599 res = xSemaphoreGive(mux->sem);
600 }
601 if (res != pdTRUE) {
602 assert(false && "Failed to release mutex!");
603 }
604 vSemaphoreDelete(mux->sem);
605 free(mux);
606
607 return 0;
608 }
609
pthread_mutex_lock_internal(esp_pthread_mutex_t * mux,TickType_t tmo)610 static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
611 {
612 if (!mux) {
613 return EINVAL;
614 }
615
616 if ((mux->type == PTHREAD_MUTEX_ERRORCHECK) &&
617 (xSemaphoreGetMutexHolder(mux->sem) == xTaskGetCurrentTaskHandle())) {
618 return EDEADLK;
619 }
620
621 if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
622 if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
623 return EBUSY;
624 }
625 } else {
626 if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
627 return EBUSY;
628 }
629 }
630
631 return 0;
632 }
633
pthread_mutex_init_if_static(pthread_mutex_t * mutex)634 static int pthread_mutex_init_if_static(pthread_mutex_t *mutex)
635 {
636 int res = 0;
637 if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
638 portENTER_CRITICAL(&s_mutex_init_lock);
639 if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
640 res = pthread_mutex_init(mutex, NULL);
641 }
642 portEXIT_CRITICAL(&s_mutex_init_lock);
643 }
644 return res;
645 }
646
pthread_mutex_lock(pthread_mutex_t * mutex)647 int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
648 {
649 if (!mutex) {
650 return EINVAL;
651 }
652 int res = pthread_mutex_init_if_static(mutex);
653 if (res != 0) {
654 return res;
655 }
656 return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
657 }
658
pthread_mutex_timedlock(pthread_mutex_t * mutex,const struct timespec * timeout)659 int IRAM_ATTR pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *timeout)
660 {
661 if (!mutex) {
662 return EINVAL;
663 }
664 int res = pthread_mutex_init_if_static(mutex);
665 if (res != 0) {
666 return res;
667 }
668
669 struct timespec currtime;
670 clock_gettime(CLOCK_REALTIME, &currtime);
671 TickType_t tmo = ((timeout->tv_sec - currtime.tv_sec)*1000 +
672 (timeout->tv_nsec - currtime.tv_nsec)/1000000)/portTICK_PERIOD_MS;
673
674 res = pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, tmo);
675 if (res == EBUSY) {
676 return ETIMEDOUT;
677 }
678 return res;
679 }
680
pthread_mutex_trylock(pthread_mutex_t * mutex)681 int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
682 {
683 if (!mutex) {
684 return EINVAL;
685 }
686 int res = pthread_mutex_init_if_static(mutex);
687 if (res != 0) {
688 return res;
689 }
690 return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
691 }
692
pthread_mutex_unlock(pthread_mutex_t * mutex)693 int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
694 {
695 esp_pthread_mutex_t *mux;
696
697 if (!mutex) {
698 return EINVAL;
699 }
700 mux = (esp_pthread_mutex_t *)*mutex;
701 if (!mux) {
702 return EINVAL;
703 }
704
705 if (((mux->type == PTHREAD_MUTEX_RECURSIVE) ||
706 (mux->type == PTHREAD_MUTEX_ERRORCHECK)) &&
707 (xSemaphoreGetMutexHolder(mux->sem) != xTaskGetCurrentTaskHandle())) {
708 return EPERM;
709 }
710
711 int ret;
712 if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
713 ret = xSemaphoreGiveRecursive(mux->sem);
714 } else {
715 ret = xSemaphoreGive(mux->sem);
716 }
717 if (ret != pdTRUE) {
718 assert(false && "Failed to unlock mutex!");
719 }
720 return 0;
721 }
722
pthread_mutexattr_init(pthread_mutexattr_t * attr)723 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
724 {
725 if (!attr) {
726 return EINVAL;
727 }
728 attr->type = PTHREAD_MUTEX_NORMAL;
729 attr->is_initialized = 1;
730 return 0;
731 }
732
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)733 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
734 {
735 if (!attr) {
736 return EINVAL;
737 }
738 attr->is_initialized = 0;
739 return 0;
740 }
741
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type)742 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
743 {
744 if (!attr) {
745 return EINVAL;
746 }
747 *type = attr->type;
748 return 0;
749 }
750
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)751 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
752 {
753 if (!attr) {
754 return EINVAL;
755 }
756 pthread_mutexattr_t tmp_attr = {.type = type};
757 int res = mutexattr_check(&tmp_attr);
758 if (!res) {
759 attr->type = type;
760 }
761 return res;
762 }
763
764 /***************** ATTRIBUTES ******************/
pthread_attr_init(pthread_attr_t * attr)765 int pthread_attr_init(pthread_attr_t *attr)
766 {
767 if (attr) {
768 /* Nothing to allocate. Set everything to default */
769 attr->stacksize = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT;
770 attr->detachstate = PTHREAD_CREATE_JOINABLE;
771 return 0;
772 }
773 return EINVAL;
774 }
775
pthread_attr_destroy(pthread_attr_t * attr)776 int pthread_attr_destroy(pthread_attr_t *attr)
777 {
778 if (attr) {
779 /* Nothing to deallocate. Reset everything to default */
780 attr->stacksize = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT;
781 attr->detachstate = PTHREAD_CREATE_JOINABLE;
782 return 0;
783 }
784 return EINVAL;
785 }
786
pthread_attr_getstacksize(const pthread_attr_t * attr,size_t * stacksize)787 int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
788 {
789 if (attr) {
790 *stacksize = attr->stacksize;
791 return 0;
792 }
793 return EINVAL;
794 }
795
pthread_attr_setstacksize(pthread_attr_t * attr,size_t stacksize)796 int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
797 {
798 if (attr && !(stacksize < PTHREAD_STACK_MIN)) {
799 attr->stacksize = stacksize;
800 return 0;
801 }
802 return EINVAL;
803 }
804
pthread_attr_getdetachstate(const pthread_attr_t * attr,int * detachstate)805 int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
806 {
807 if (attr) {
808 *detachstate = attr->detachstate;
809 return 0;
810 }
811 return EINVAL;
812 }
813
pthread_attr_setdetachstate(pthread_attr_t * attr,int detachstate)814 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
815 {
816 if (attr) {
817 switch (detachstate) {
818 case PTHREAD_CREATE_DETACHED:
819 attr->detachstate = PTHREAD_CREATE_DETACHED;
820 break;
821 case PTHREAD_CREATE_JOINABLE:
822 attr->detachstate = PTHREAD_CREATE_JOINABLE;
823 break;
824 default:
825 return EINVAL;
826 }
827 return 0;
828 }
829 return EINVAL;
830 }
831
832 /* Hook function to force linking this file */
pthread_include_pthread_impl(void)833 void pthread_include_pthread_impl(void)
834 {
835 }
836