1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <sys/lock.h>
8 #include <stdlib.h>
9 #include <sys/reent.h>
10 #include "esp_attr.h"
11 #include "freertos/FreeRTOS.h"
12 #include "freertos/semphr.h"
13 #include "freertos/task.h"
14 #include "freertos/portable.h"
15 #include "esp_rom_caps.h"
16 
17 /* Notes on our newlib lock implementation:
18  *
19  * - Use FreeRTOS mutex semaphores as locks.
20  * - lock_t is int, but we store an SemaphoreHandle_t there.
21  * - Locks are no-ops until the FreeRTOS scheduler is running.
22  * - Due to this, locks need to be lazily initialised the first time
23  *   they are acquired. Initialisation/deinitialisation of locks is
24  *   protected by lock_init_spinlock.
25  * - Race conditions around lazy initialisation (via lock_acquire) are
26  *   protected against.
27  * - Anyone calling lock_close is reponsible for ensuring noone else
28  *   is holding the lock at this time.
29  * - Race conditions between lock_close & lock_init (for the same lock)
30  *   are the responsibility of the caller.
31  */
32 
33 static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
34 
35 /* Initialize the given lock by allocating a new mutex semaphore
36    as the _lock_t value.
37 
38    Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have
39    been initialised (to zero only) before the RTOS scheduler started.
40 */
lock_init_generic(_lock_t * lock,uint8_t mutex_type)41 static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) {
42     portENTER_CRITICAL(&lock_init_spinlock);
43     if (*lock) {
44          /* Lock already initialised (either we didn't check earlier,
45           or it got initialised while we were waiting for the
46           spinlock.) */
47     }
48     else
49     {
50         /* Create a new semaphore
51 
52            this is a bit of an API violation, as we're calling the
53            private function xQueueCreateMutex(x) directly instead of
54            the xSemaphoreCreateMutex / xSemaphoreCreateRecursiveMutex
55            wrapper functions...
56 
57            The better alternative would be to pass pointers to one of
58            the two xSemaphoreCreate___Mutex functions, but as FreeRTOS
59            implements these as macros instead of inline functions
60            (*party like it's 1998!*) it's not possible to do this
61            without writing wrappers. Doing it this way seems much less
62            spaghetti-like.
63         */
64         SemaphoreHandle_t new_sem = xQueueCreateMutex(mutex_type);
65         if (!new_sem) {
66             abort(); /* No more semaphores available or OOM */
67         }
68         *lock = (_lock_t)new_sem;
69     }
70     portEXIT_CRITICAL(&lock_init_spinlock);
71 }
72 
_lock_init(_lock_t * lock)73 void IRAM_ATTR _lock_init(_lock_t *lock) {
74     *lock = 0; // In case lock's memory is uninitialized
75     lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
76 }
77 
_lock_init_recursive(_lock_t * lock)78 void IRAM_ATTR _lock_init_recursive(_lock_t *lock) {
79     *lock = 0; // In case lock's memory is uninitialized
80     lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
81 }
82 
83 /* Free the mutex semaphore pointed to by *lock, and zero it out.
84 
85    Note that FreeRTOS doesn't account for deleting mutexes while they
86    are held, and neither do we... so take care not to delete newlib
87    locks while they may be held by other tasks!
88 
89    Also, deleting a lock in this way will cause it to be lazily
90    re-initialised if it is used again. Caller has to avoid doing
91    this!
92 */
_lock_close(_lock_t * lock)93 void IRAM_ATTR _lock_close(_lock_t *lock) {
94     portENTER_CRITICAL(&lock_init_spinlock);
95     if (*lock) {
96         SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
97 #if (INCLUDE_xSemaphoreGetMutexHolder == 1)
98         configASSERT(xSemaphoreGetMutexHolder(h) == NULL); /* mutex should not be held */
99 #endif
100         vSemaphoreDelete(h);
101         *lock = 0;
102     }
103     portEXIT_CRITICAL(&lock_init_spinlock);
104 }
105 
106 void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
107 
108 /* Acquire the mutex semaphore for lock. wait up to delay ticks.
109    mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
110 */
lock_acquire_generic(_lock_t * lock,uint32_t delay,uint8_t mutex_type)111 static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) {
112     SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
113     if (!h) {
114         if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
115             return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
116         }
117         /* lazy initialise lock - might have had a static initializer (that we don't use) */
118         lock_init_generic(lock, mutex_type);
119         h = (SemaphoreHandle_t)(*lock);
120         configASSERT(h != NULL);
121     }
122 
123     if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
124         return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
125     }
126     BaseType_t success;
127     if (!xPortCanYield()) {
128         /* In ISR Context */
129         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
130             abort(); /* recursive mutexes make no sense in ISR context */
131         }
132         BaseType_t higher_task_woken = false;
133         success = xSemaphoreTakeFromISR(h, &higher_task_woken);
134         if (!success && delay > 0) {
135             abort(); /* Tried to block on mutex from ISR, couldn't... rewrite your program to avoid libc interactions in ISRs! */
136         }
137         if (higher_task_woken) {
138             portYIELD_FROM_ISR();
139         }
140     }
141     else {
142         /* In task context */
143         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
144             success = xSemaphoreTakeRecursive(h, delay);
145         } else {
146             success = xSemaphoreTake(h, delay);
147         }
148     }
149 
150     return (success == pdTRUE) ? 0 : -1;
151 }
152 
_lock_acquire(_lock_t * lock)153 void IRAM_ATTR _lock_acquire(_lock_t *lock) {
154     lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
155 }
156 
_lock_acquire_recursive(_lock_t * lock)157 void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) {
158     lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
159 }
160 
_lock_try_acquire(_lock_t * lock)161 int IRAM_ATTR _lock_try_acquire(_lock_t *lock) {
162     return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
163 }
164 
_lock_try_acquire_recursive(_lock_t * lock)165 int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) {
166     return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
167 }
168 
169 /* Release the mutex semaphore for lock.
170    mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
171 */
lock_release_generic(_lock_t * lock,uint8_t mutex_type)172 static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) {
173     if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
174         return; /* locking is a no-op before scheduler is up */
175     }
176     SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
177     assert(h);
178 
179     if (!xPortCanYield()) {
180         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
181             abort(); /* indicates logic bug, it shouldn't be possible to lock recursively in ISR */
182         }
183         BaseType_t higher_task_woken = false;
184         xSemaphoreGiveFromISR(h, &higher_task_woken);
185         if (higher_task_woken) {
186             portYIELD_FROM_ISR();
187         }
188     } else {
189         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
190             xSemaphoreGiveRecursive(h);
191         } else {
192             xSemaphoreGive(h);
193         }
194     }
195 }
196 
_lock_release(_lock_t * lock)197 void IRAM_ATTR _lock_release(_lock_t *lock) {
198     lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
199 }
200 
_lock_release_recursive(_lock_t * lock)201 void IRAM_ATTR _lock_release_recursive(_lock_t *lock) {
202     lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
203 }
204 
205 /* To ease the transition to newlib 3.3.0, this part is kept under an ifdef.
206  * After the toolchain with newlib 3.3.0 is released and merged, the ifdefs
207  * can be removed.
208  *
209  * Also the retargetable locking functions still rely on the previous
210  * implementation. Once support for !_RETARGETABLE_LOCKING is removed,
211  * the code can be simplified, removing support for lazy initialization of
212  * locks. At the same time, IDF code which relies on _lock_acquire/_lock_release
213  * will have to be updated to not depend on lazy initialization.
214  *
215  * Explanation of the different lock types:
216  *
217  *  Newlib 2.2.0 and 3.0.0:
218  *    _lock_t is defined as int, stores SemaphoreHandle_t.
219  *
220  *  Newlib 3.3.0:
221  *    struct __lock is (or contains) StaticSemaphore_t
222  *    _LOCK_T is a pointer to struct __lock, equivalent to SemaphoreHandle_t.
223  *    It has the same meaning as _lock_t in the previous implementation.
224  *
225  */
226 
227 /* This ensures the platform-specific definition in lock.h is correct.
228  * We use "greater or equal" since the size of StaticSemaphore_t may
229  * vary by 2 words, depending on whether configUSE_TRACE_FACILITY is enabled.
230  */
231 _Static_assert(sizeof(struct __lock) >= sizeof(StaticSemaphore_t),
232                "Incorrect size of struct __lock");
233 
234 /* FreeRTOS configuration check */
235 _Static_assert(configSUPPORT_STATIC_ALLOCATION,
236                "FreeRTOS should be configured with static allocation support");
237 
238 /* These 2 locks are used instead of 9 distinct newlib static locks,
239  * as most of the locks are required for lesser-used features, so
240  * the chance of performance degradation due to lock contention is low.
241  */
242 static StaticSemaphore_t s_common_mutex;
243 static StaticSemaphore_t s_common_recursive_mutex;
244 
245 
246 #if ESP_ROM_HAS_RETARGETABLE_LOCKING
247 /* C3 and S3 ROMs are built without Newlib static lock symbols exported, and
248  * with an extra level of _LOCK_T indirection in mind.
249  * The following is a workaround for this:
250  * - on startup, we call esp_rom_newlib_init_common_mutexes to set
251  *   the two mutex pointers to magic values.
252  * - if in __retarget_lock_acquire*, we check if the argument dereferences
253  *   to the magic value. If yes, we lock the correct mutex defined in the app,
254  *   instead.
255  * Casts from &StaticSemaphore_t to _LOCK_T are okay because _LOCK_T
256  * (which is SemaphoreHandle_t) is a pointer to the corresponding
257  * StaticSemaphore_t structure. This is ensured by asserts below.
258  */
259 
260 #define ROM_NEEDS_MUTEX_OVERRIDE
261 #endif // ESP_ROM_HAS_RETARGETABLE_LOCKING
262 
263 #ifdef ROM_NEEDS_MUTEX_OVERRIDE
264 #define ROM_MUTEX_MAGIC  0xbb10c433
265 /* This is a macro, since we are overwriting the argument  */
266 #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) \
267     if (*(int*)_lock == ROM_MUTEX_MAGIC) { \
268         (_lock) = (_LOCK_T) (_lock_to_use_instead); \
269     }
270 #else  // ROM_NEEDS_MUTEX_OVERRIDE
271 #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
272 #endif // ROM_NEEDS_MUTEX_OVERRIDE
273 
274 
__retarget_lock_init(_LOCK_T * lock)275 void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
276 {
277     *lock = NULL;  /* In case lock's memory is uninitialized */
278     lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
279 }
280 
__retarget_lock_init_recursive(_LOCK_T * lock)281 void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
282 {
283     *lock = NULL;  /* In case lock's memory is uninitialized */
284     lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
285 }
286 
__retarget_lock_close(_LOCK_T lock)287 void IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
288 {
289     _lock_close(&lock);
290 }
291 
__retarget_lock_close_recursive(_LOCK_T lock)292 void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
293 {
294     _lock_close_recursive(&lock);
295 }
296 
297 /* Separate function, to prevent generating multiple assert strings */
check_lock_nonzero(_LOCK_T lock)298 static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
299 {
300     assert(lock != NULL && "Uninitialized lock used");
301 }
302 
__retarget_lock_acquire(_LOCK_T lock)303 void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
304 {
305     check_lock_nonzero(lock);
306     MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
307     _lock_acquire(&lock);
308 }
309 
__retarget_lock_acquire_recursive(_LOCK_T lock)310 void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
311 {
312     check_lock_nonzero(lock);
313     MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
314     _lock_acquire_recursive(&lock);
315 }
316 
__retarget_lock_try_acquire(_LOCK_T lock)317 int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
318 {
319     check_lock_nonzero(lock);
320     MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
321     return _lock_try_acquire(&lock);
322 }
323 
__retarget_lock_try_acquire_recursive(_LOCK_T lock)324 int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
325 {
326     check_lock_nonzero(lock);
327     MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
328     return _lock_try_acquire_recursive(&lock);
329 }
330 
__retarget_lock_release(_LOCK_T lock)331 void IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
332 {
333     check_lock_nonzero(lock);
334     _lock_release(&lock);
335 }
336 
__retarget_lock_release_recursive(_LOCK_T lock)337 void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
338 {
339     check_lock_nonzero(lock);
340     _lock_release_recursive(&lock);
341 }
342 
343 /* When _RETARGETABLE_LOCKING is enabled, newlib expects the following locks to be provided: */
344 
345 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sinit_recursive_mutex;
346 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___malloc_recursive_mutex;
347 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___env_recursive_mutex;
348 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sfp_recursive_mutex;
349 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___atexit_recursive_mutex;
350 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___at_quick_exit_mutex;
351 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___tz_mutex;
352 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___dd_hash_mutex;
353 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___arc4random_mutex;
354 
esp_newlib_locks_init(void)355 void esp_newlib_locks_init(void)
356 {
357     /* Initialize the two mutexes used for the locks above.
358      * Asserts below check our assumption that SemaphoreHandle_t will always
359      * point to the corresponding StaticSemaphore_t structure.
360      */
361     SemaphoreHandle_t handle;
362     handle = xSemaphoreCreateMutexStatic(&s_common_mutex);
363     assert(handle == (SemaphoreHandle_t) &s_common_mutex);
364     handle = xSemaphoreCreateRecursiveMutexStatic(&s_common_recursive_mutex);
365     assert(handle == (SemaphoreHandle_t) &s_common_recursive_mutex);
366     (void) handle;
367 
368     /* Chip ROMs are built with older versions of newlib, and rely on different lock variables.
369      * Initialize these locks to the same values.
370      */
371 #ifdef CONFIG_IDF_TARGET_ESP32
372     /* Newlib 2.2.0 is used in ROM, the following lock symbols are defined: */
373     extern _lock_t __sfp_lock;
374     __sfp_lock = (_lock_t) &s_common_recursive_mutex;
375     extern _lock_t __sinit_lock;
376     __sinit_lock = (_lock_t) &s_common_recursive_mutex;
377     extern _lock_t __env_lock_object;
378     __env_lock_object = (_lock_t) &s_common_recursive_mutex;
379     extern _lock_t __tz_lock_object;
380     __tz_lock_object = (_lock_t) &s_common_mutex;
381 #elif defined(CONFIG_IDF_TARGET_ESP32S2)
382     /* Newlib 3.0.0 is used in ROM, the following lock symbols are defined: */
383     extern _lock_t __sinit_recursive_mutex;
384     __sinit_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
385     extern _lock_t __sfp_recursive_mutex;
386     __sfp_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
387 #elif ESP_ROM_HAS_RETARGETABLE_LOCKING
388     /* Newlib 3.3.0 is used in ROM, built with _RETARGETABLE_LOCKING.
389      * No access to lock variables for the purpose of ECO forward compatibility,
390      * however we have an API to initialize lock variables used in the ROM.
391      */
392     extern void esp_rom_newlib_init_common_mutexes(_LOCK_T, _LOCK_T);
393     /* See notes about ROM_NEEDS_MUTEX_OVERRIDE above */
394     int magic_val = ROM_MUTEX_MAGIC;
395     _LOCK_T magic_mutex = (_LOCK_T) &magic_val;
396     esp_rom_newlib_init_common_mutexes(magic_mutex, magic_mutex);
397 #else // other target
398 #error Unsupported target
399 #endif
400 }
401