1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <sys/lock.h>
16 #include <stdlib.h>
17 #include <sys/reent.h>
18 #include "esp_attr.h"
19 #include "soc/cpu.h"
20 #include "freertos/FreeRTOS.h"
21 #include "freertos/semphr.h"
22 #include "freertos/task.h"
23 #include "freertos/portable.h"
24 #include "esp_rom_caps.h"
25 
26 /* Notes on our newlib lock implementation:
27  *
28  * - Use FreeRTOS mutex semaphores as locks.
29  * - lock_t is int, but we store an xSemaphoreHandle there.
30  * - Locks are no-ops until the FreeRTOS scheduler is running.
31  * - Due to this, locks need to be lazily initialised the first time
32  *   they are acquired. Initialisation/deinitialisation of locks is
33  *   protected by lock_init_spinlock.
34  * - Race conditions around lazy initialisation (via lock_acquire) are
35  *   protected against.
36  * - Anyone calling lock_close is reponsible for ensuring noone else
37  *   is holding the lock at this time.
38  * - Race conditions between lock_close & lock_init (for the same lock)
39  *   are the responsibility of the caller.
40  */
41 
42 static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
43 
44 /* Initialize the given lock by allocating a new mutex semaphore
45    as the _lock_t value.
46 
47    Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have
48    been initialised (to zero only) before the RTOS scheduler started.
49 */
lock_init_generic(_lock_t * lock,uint8_t mutex_type)50 static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) {
51     portENTER_CRITICAL(&lock_init_spinlock);
52     if (*lock) {
53          /* Lock already initialised (either we didn't check earlier,
54           or it got initialised while we were waiting for the
55           spinlock.) */
56     }
57     else
58     {
59         /* Create a new semaphore
60 
61            this is a bit of an API violation, as we're calling the
62            private function xQueueCreateMutex(x) directly instead of
63            the xSemaphoreCreateMutex / xSemaphoreCreateRecursiveMutex
64            wrapper functions...
65 
66            The better alternative would be to pass pointers to one of
67            the two xSemaphoreCreate___Mutex functions, but as FreeRTOS
68            implements these as macros instead of inline functions
69            (*party like it's 1998!*) it's not possible to do this
70            without writing wrappers. Doing it this way seems much less
71            spaghetti-like.
72         */
73         xSemaphoreHandle new_sem = xQueueCreateMutex(mutex_type);
74         if (!new_sem) {
75             abort(); /* No more semaphores available or OOM */
76         }
77         *lock = (_lock_t)new_sem;
78     }
79     portEXIT_CRITICAL(&lock_init_spinlock);
80 }
81 
_lock_init(_lock_t * lock)82 void IRAM_ATTR _lock_init(_lock_t *lock) {
83     *lock = 0; // In case lock's memory is uninitialized
84     lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
85 }
86 
_lock_init_recursive(_lock_t * lock)87 void IRAM_ATTR _lock_init_recursive(_lock_t *lock) {
88     *lock = 0; // In case lock's memory is uninitialized
89     lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
90 }
91 
92 /* Free the mutex semaphore pointed to by *lock, and zero it out.
93 
94    Note that FreeRTOS doesn't account for deleting mutexes while they
95    are held, and neither do we... so take care not to delete newlib
96    locks while they may be held by other tasks!
97 
98    Also, deleting a lock in this way will cause it to be lazily
99    re-initialised if it is used again. Caller has to avoid doing
100    this!
101 */
_lock_close(_lock_t * lock)102 void IRAM_ATTR _lock_close(_lock_t *lock) {
103     portENTER_CRITICAL(&lock_init_spinlock);
104     if (*lock) {
105         xSemaphoreHandle h = (xSemaphoreHandle)(*lock);
106 #if (INCLUDE_xSemaphoreGetMutexHolder == 1)
107         configASSERT(xSemaphoreGetMutexHolder(h) == NULL); /* mutex should not be held */
108 #endif
109         vSemaphoreDelete(h);
110         *lock = 0;
111     }
112     portEXIT_CRITICAL(&lock_init_spinlock);
113 }
114 
115 void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
116 
117 /* Acquire the mutex semaphore for lock. wait up to delay ticks.
118    mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
119 */
lock_acquire_generic(_lock_t * lock,uint32_t delay,uint8_t mutex_type)120 static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) {
121     xSemaphoreHandle h = (xSemaphoreHandle)(*lock);
122     if (!h) {
123         if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
124             return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
125         }
126         /* lazy initialise lock - might have had a static initializer (that we don't use) */
127         lock_init_generic(lock, mutex_type);
128         h = (xSemaphoreHandle)(*lock);
129         configASSERT(h != NULL);
130     }
131 
132     if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
133         return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
134     }
135     BaseType_t success;
136     if (!xPortCanYield()) {
137         /* In ISR Context */
138         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
139             abort(); /* recursive mutexes make no sense in ISR context */
140         }
141         BaseType_t higher_task_woken = false;
142         success = xSemaphoreTakeFromISR(h, &higher_task_woken);
143         if (!success && delay > 0) {
144             abort(); /* Tried to block on mutex from ISR, couldn't... rewrite your program to avoid libc interactions in ISRs! */
145         }
146         if (higher_task_woken) {
147             portYIELD_FROM_ISR();
148         }
149     }
150     else {
151         /* In task context */
152         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
153             success = xSemaphoreTakeRecursive(h, delay);
154         } else {
155             success = xSemaphoreTake(h, delay);
156         }
157     }
158 
159     return (success == pdTRUE) ? 0 : -1;
160 }
161 
_lock_acquire(_lock_t * lock)162 void IRAM_ATTR _lock_acquire(_lock_t *lock) {
163     lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
164 }
165 
_lock_acquire_recursive(_lock_t * lock)166 void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) {
167     lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
168 }
169 
_lock_try_acquire(_lock_t * lock)170 int IRAM_ATTR _lock_try_acquire(_lock_t *lock) {
171     return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
172 }
173 
_lock_try_acquire_recursive(_lock_t * lock)174 int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) {
175     return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
176 }
177 
178 /* Release the mutex semaphore for lock.
179    mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
180 */
lock_release_generic(_lock_t * lock,uint8_t mutex_type)181 static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) {
182     if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
183         return; /* locking is a no-op before scheduler is up */
184     }
185     xSemaphoreHandle h = (xSemaphoreHandle)(*lock);
186     assert(h);
187 
188     if (!xPortCanYield()) {
189         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
190             abort(); /* indicates logic bug, it shouldn't be possible to lock recursively in ISR */
191         }
192         BaseType_t higher_task_woken = false;
193         xSemaphoreGiveFromISR(h, &higher_task_woken);
194         if (higher_task_woken) {
195             portYIELD_FROM_ISR();
196         }
197     } else {
198         if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
199             xSemaphoreGiveRecursive(h);
200         } else {
201             xSemaphoreGive(h);
202         }
203     }
204 }
205 
_lock_release(_lock_t * lock)206 void IRAM_ATTR _lock_release(_lock_t *lock) {
207     lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
208 }
209 
_lock_release_recursive(_lock_t * lock)210 void IRAM_ATTR _lock_release_recursive(_lock_t *lock) {
211     lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
212 }
213 
214 /* To ease the transition to newlib 3.3.0, this part is kept under an ifdef.
215  * After the toolchain with newlib 3.3.0 is released and merged, the ifdefs
216  * can be removed.
217  *
218  * Also the retargetable locking functions still rely on the previous
219  * implementation. Once support for !_RETARGETABLE_LOCKING is removed,
220  * the code can be simplified, removing support for lazy initialization of
221  * locks. At the same time, IDF code which relies on _lock_acquire/_lock_release
222  * will have to be updated to not depend on lazy initialization.
223  *
224  * Explanation of the different lock types:
225  *
226  *  Newlib 2.2.0 and 3.0.0:
227  *    _lock_t is defined as int, stores SemaphoreHandle_t.
228  *
229  *  Newlib 3.3.0:
230  *    struct __lock is (or contains) StaticSemaphore_t
231  *    _LOCK_T is a pointer to struct __lock, equivalent to SemaphoreHandle_t.
232  *    It has the same meaning as _lock_t in the previous implementation.
233  *
234  */
235 
236 /* This ensures the platform-specific definition in lock.h is correct.
237  * We use "greater or equal" since the size of StaticSemaphore_t may
238  * vary by 2 words, depending on whether configUSE_TRACE_FACILITY is enabled.
239  */
240 _Static_assert(sizeof(struct __lock) >= sizeof(StaticSemaphore_t),
241                "Incorrect size of struct __lock");
242 
243 /* FreeRTOS configuration check */
244 _Static_assert(configSUPPORT_STATIC_ALLOCATION,
245                "FreeRTOS should be configured with static allocation support");
246 
247 /* These 2 locks are used instead of 9 distinct newlib static locks,
248  * as most of the locks are required for lesser-used features, so
249  * the chance of performance degradation due to lock contention is low.
250  */
251 static StaticSemaphore_t s_common_mutex;
252 static StaticSemaphore_t s_common_recursive_mutex;
253 
254 
255 #if defined(CONFIG_IDF_TARGET_ESP32C3) || defined(CONFIG_IDF_TARGET_ESP32S3) || defined(CONFIG_IDF_TARGET_ESP32H2)
256 /* C3 and S3 ROMs are built without Newlib static lock symbols exported, and
257  * with an extra level of _LOCK_T indirection in mind.
258  * The following is a workaround for this:
259  * - on startup, we call esp_rom_newlib_init_common_mutexes to set
260  *   the two mutex pointers to magic values.
261  * - if in __retarget_lock_acquire*, we check if the argument dereferences
262  *   to the magic value. If yes, we lock the correct mutex defined in the app,
263  *   instead.
264  * Casts from &StaticSemaphore_t to _LOCK_T are okay because _LOCK_T
265  * (which is SemaphoreHandle_t) is a pointer to the corresponding
266  * StaticSemaphore_t structure. This is ensured by asserts below.
267  */
268 
269 #define ROM_NEEDS_MUTEX_OVERRIDE
270 #endif // defined(CONFIG_IDF_TARGET_ESP32C3) || defined(CONFIG_IDF_TARGET_ESP32S3) || defined(CONFIG_IDF_TARGET_ESP32H2)
271 
272 #ifdef ROM_NEEDS_MUTEX_OVERRIDE
273 #define ROM_MUTEX_MAGIC  0xbb10c433
274 /* This is a macro, since we are overwriting the argument  */
275 #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) \
276     if (*(int*)_lock == ROM_MUTEX_MAGIC) { \
277         (_lock) = (_LOCK_T) (_lock_to_use_instead); \
278     }
279 #else  // ROM_NEEDS_MUTEX_OVERRIDE
280 #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
281 #endif // ROM_NEEDS_MUTEX_OVERRIDE
282 
283 
__retarget_lock_init(_LOCK_T * lock)284 void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
285 {
286     *lock = NULL;  /* In case lock's memory is uninitialized */
287     lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
288 }
289 
__retarget_lock_init_recursive(_LOCK_T * lock)290 void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
291 {
292     *lock = NULL;  /* In case lock's memory is uninitialized */
293     lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
294 }
295 
__retarget_lock_close(_LOCK_T lock)296 void IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
297 {
298     _lock_close(&lock);
299 }
300 
__retarget_lock_close_recursive(_LOCK_T lock)301 void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
302 {
303     _lock_close_recursive(&lock);
304 }
305 
306 /* Separate function, to prevent generating multiple assert strings */
check_lock_nonzero(_LOCK_T lock)307 static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
308 {
309     assert(lock != NULL && "Uninitialized lock used");
310 }
311 
__retarget_lock_acquire(_LOCK_T lock)312 void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
313 {
314     check_lock_nonzero(lock);
315     MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
316     _lock_acquire(&lock);
317 }
318 
__retarget_lock_acquire_recursive(_LOCK_T lock)319 void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
320 {
321     check_lock_nonzero(lock);
322     MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
323     _lock_acquire_recursive(&lock);
324 }
325 
__retarget_lock_try_acquire(_LOCK_T lock)326 int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
327 {
328     check_lock_nonzero(lock);
329     MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
330     return _lock_try_acquire(&lock);
331 }
332 
__retarget_lock_try_acquire_recursive(_LOCK_T lock)333 int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
334 {
335     check_lock_nonzero(lock);
336     MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
337     return _lock_try_acquire_recursive(&lock);
338 }
339 
__retarget_lock_release(_LOCK_T lock)340 void IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
341 {
342     check_lock_nonzero(lock);
343     _lock_release(&lock);
344 }
345 
__retarget_lock_release_recursive(_LOCK_T lock)346 void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
347 {
348     check_lock_nonzero(lock);
349     _lock_release_recursive(&lock);
350 }
351 
352 /* When _RETARGETABLE_LOCKING is enabled, newlib expects the following locks to be provided: */
353 
354 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sinit_recursive_mutex;
355 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___malloc_recursive_mutex;
356 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___env_recursive_mutex;
357 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sfp_recursive_mutex;
358 extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___atexit_recursive_mutex;
359 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___at_quick_exit_mutex;
360 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___tz_mutex;
361 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___dd_hash_mutex;
362 extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___arc4random_mutex;
363 
esp_newlib_locks_init(void)364 void esp_newlib_locks_init(void)
365 {
366     /* Initialize the two mutexes used for the locks above.
367      * Asserts below check our assumption that SemaphoreHandle_t will always
368      * point to the corresponding StaticSemaphore_t structure.
369      */
370     SemaphoreHandle_t handle;
371     handle = xSemaphoreCreateMutexStatic(&s_common_mutex);
372     assert(handle == (SemaphoreHandle_t) &s_common_mutex);
373     handle = xSemaphoreCreateRecursiveMutexStatic(&s_common_recursive_mutex);
374     assert(handle == (SemaphoreHandle_t) &s_common_recursive_mutex);
375     (void) handle;
376 
377     /* Chip ROMs are built with older versions of newlib, and rely on different lock variables.
378      * Initialize these locks to the same values.
379      */
380 #ifdef CONFIG_IDF_TARGET_ESP32
381     /* Newlib 2.2.0 is used in ROM, the following lock symbols are defined: */
382     extern _lock_t __sfp_lock;
383     __sfp_lock = (_lock_t) &s_common_recursive_mutex;
384     extern _lock_t __sinit_lock;
385     __sinit_lock = (_lock_t) &s_common_recursive_mutex;
386     extern _lock_t __env_lock_object;
387     __env_lock_object = (_lock_t) &s_common_mutex;
388     extern _lock_t __tz_lock_object;
389     __tz_lock_object = (_lock_t) &s_common_recursive_mutex;
390 #elif defined(CONFIG_IDF_TARGET_ESP32S2)
391     /* Newlib 3.0.0 is used in ROM, the following lock symbols are defined: */
392     extern _lock_t __sinit_recursive_mutex;
393     __sinit_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
394     extern _lock_t __sfp_recursive_mutex;
395     __sfp_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
396 #elif defined(CONFIG_IDF_TARGET_ESP32C3) || defined(CONFIG_IDF_TARGET_ESP32S3) || defined(CONFIG_IDF_TARGET_ESP32H2)
397     /* Newlib 3.3.0 is used in ROM, built with _RETARGETABLE_LOCKING.
398      * No access to lock variables for the purpose of ECO forward compatibility,
399      * however we have an API to initialize lock variables used in the ROM.
400      */
401     extern void esp_rom_newlib_init_common_mutexes(_LOCK_T, _LOCK_T);
402     /* See notes about ROM_NEEDS_MUTEX_OVERRIDE above */
403     int magic_val = ROM_MUTEX_MAGIC;
404     _LOCK_T magic_mutex = (_LOCK_T) &magic_val;
405     esp_rom_newlib_init_common_mutexes(magic_mutex, magic_mutex);
406 #else // other target
407 #error Unsupported target
408 #endif
409 }
410