1 /*
2  * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "pico/mutex.h"
8 #include "pico/time.h"
9 #include "pico/runtime_init.h"
10 
11 #if !PICO_RUNTIME_NO_INIT_MUTEX
runtime_init_mutex(void)12 void __weak runtime_init_mutex(void) {
13     // this is an array of either mutex_t or recursive_mutex_t (i.e. not necessarily the same size)
14     // however each starts with a lock_core_t, and the spin_lock is initialized to address 1 for a recursive
15     // spinlock and 0 for a regular one.
16 
17     static_assert(!(sizeof(mutex_t)&3), "");
18     static_assert(!(sizeof(recursive_mutex_t)&3), "");
19     static_assert(!offsetof(mutex_t, core), "");
20     static_assert(!offsetof(recursive_mutex_t, core), "");
21     extern lock_core_t __mutex_array_start;
22     extern lock_core_t __mutex_array_end;
23 
24     for (lock_core_t *l = &__mutex_array_start; l < &__mutex_array_end; ) {
25         if (l->spin_lock) {
26             assert(1 == (uintptr_t)l->spin_lock); // indicator for a recursive mutex
27             recursive_mutex_t *rm = (recursive_mutex_t *)l;
28             recursive_mutex_init(rm);
29             l = &rm[1].core; // next
30         } else {
31             mutex_t *m = (mutex_t *)l;
32             mutex_init(m);
33             l = &m[1].core; // next
34         }
35     }
36 }
37 #endif
38 
39 #if defined(PICO_RUNTIME_INIT_MUTEX) && !PICO_RUNTIME_SKIP_INIT_MUTEX
40 PICO_RUNTIME_INIT_FUNC_RUNTIME(runtime_init_mutex, PICO_RUNTIME_INIT_MUTEX);
41 #endif
42 
mutex_init(mutex_t * mtx)43 void mutex_init(mutex_t *mtx) {
44     lock_init(&mtx->core, next_striped_spin_lock_num());
45     mtx->owner = LOCK_INVALID_OWNER_ID;
46 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
47     mtx->recursive = false;
48 #endif
49     __mem_fence_release();
50 }
51 
recursive_mutex_init(recursive_mutex_t * mtx)52 void recursive_mutex_init(recursive_mutex_t *mtx) {
53     lock_init(&mtx->core, next_striped_spin_lock_num());
54     mtx->owner = LOCK_INVALID_OWNER_ID;
55     mtx->enter_count = 0;
56 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
57     mtx->recursive = true;
58 #endif
59     __mem_fence_release();
60 }
61 
__time_critical_func(mutex_enter_blocking)62 void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) {
63 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
64     if (mtx->recursive) {
65         recursive_mutex_enter_blocking(mtx);
66         return;
67     }
68 #endif
69     lock_owner_id_t caller = lock_get_caller_owner_id();
70     do {
71         uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
72         if (!lock_is_owner_id_valid(mtx->owner)) {
73             mtx->owner = caller;
74             spin_unlock(mtx->core.spin_lock, save);
75             break;
76         }
77         lock_internal_spin_unlock_with_wait(&mtx->core, save);
78     } while (true);
79 }
80 
__time_critical_func(recursive_mutex_enter_blocking)81 void __time_critical_func(recursive_mutex_enter_blocking)(recursive_mutex_t *mtx) {
82     lock_owner_id_t caller = lock_get_caller_owner_id();
83     do {
84         uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
85         if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) {
86             mtx->owner = caller;
87             uint __unused total = ++mtx->enter_count;
88             spin_unlock(mtx->core.spin_lock, save);
89             assert(total); // check for overflow
90             return;
91         } else {
92             lock_internal_spin_unlock_with_wait(&mtx->core, save);
93         }
94     } while (true);
95 }
96 
__time_critical_func(mutex_try_enter)97 bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) {
98 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
99     if (mtx->recursive) {
100         return recursive_mutex_try_enter(mtx, owner_out);
101     }
102 #endif
103     bool entered;
104     uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
105     if (!lock_is_owner_id_valid(mtx->owner)) {
106         mtx->owner = lock_get_caller_owner_id();
107         entered = true;
108     } else {
109         if (owner_out) *owner_out = (uint32_t) mtx->owner;
110         entered = false;
111     }
112     spin_unlock(mtx->core.spin_lock, save);
113     return entered;
114 }
115 
__time_critical_func(mutex_try_enter_block_until)116 bool __time_critical_func(mutex_try_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
117     // not using lock_owner_id_t to avoid backwards incompatibility change to mutex_try_enter API
118     static_assert(sizeof(lock_owner_id_t) <= 4, "");
119     uint32_t owner;
120     if (!mutex_try_enter(mtx, &owner)) {
121         if ((lock_owner_id_t)owner == lock_get_caller_owner_id()) return false; // deadlock, so we can never own it
122         return mutex_enter_block_until(mtx, until);
123     }
124     return true;
125 }
126 
__time_critical_func(recursive_mutex_try_enter)127 bool __time_critical_func(recursive_mutex_try_enter)(recursive_mutex_t *mtx, uint32_t *owner_out) {
128     bool entered;
129     lock_owner_id_t caller = lock_get_caller_owner_id();
130     uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
131     if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
132         mtx->owner = caller;
133         uint __unused total = ++mtx->enter_count;
134         assert(total); // check for overflow
135         entered = true;
136     } else {
137         if (owner_out) *owner_out = (uint32_t) mtx->owner;
138         entered = false;
139     }
140     spin_unlock(mtx->core.spin_lock, save);
141     return entered;
142 }
143 
__time_critical_func(mutex_enter_timeout_ms)144 bool __time_critical_func(mutex_enter_timeout_ms)(mutex_t *mtx, uint32_t timeout_ms) {
145     return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
146 }
147 
__time_critical_func(recursive_mutex_enter_timeout_ms)148 bool __time_critical_func(recursive_mutex_enter_timeout_ms)(recursive_mutex_t *mtx, uint32_t timeout_ms) {
149     return recursive_mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
150 }
151 
__time_critical_func(mutex_enter_timeout_us)152 bool __time_critical_func(mutex_enter_timeout_us)(mutex_t *mtx, uint32_t timeout_us) {
153     return mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
154 }
155 
__time_critical_func(recursive_mutex_enter_timeout_us)156 bool __time_critical_func(recursive_mutex_enter_timeout_us)(recursive_mutex_t *mtx, uint32_t timeout_us) {
157     return recursive_mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
158 }
159 
__time_critical_func(mutex_enter_block_until)160 bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
161 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
162     if (mtx->recursive) {
163         return recursive_mutex_enter_block_until(mtx, until);
164     }
165 #endif
166     assert(mtx->core.spin_lock);
167     lock_owner_id_t caller = lock_get_caller_owner_id();
168     do {
169         uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
170         if (!lock_is_owner_id_valid(mtx->owner)) {
171             mtx->owner = caller;
172             spin_unlock(mtx->core.spin_lock, save);
173             return true;
174         } else {
175             if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
176                 // timed out
177                 return false;
178             }
179             // not timed out; spin lock already unlocked, so loop again
180         }
181     } while (true);
182 }
183 
__time_critical_func(recursive_mutex_enter_block_until)184 bool __time_critical_func(recursive_mutex_enter_block_until)(recursive_mutex_t *mtx, absolute_time_t until) {
185     assert(mtx->core.spin_lock);
186     lock_owner_id_t caller = lock_get_caller_owner_id();
187     do {
188         uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
189         if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
190             mtx->owner = caller;
191             uint __unused total = ++mtx->enter_count;
192             spin_unlock(mtx->core.spin_lock, save);
193             assert(total); // check for overflow
194             return true;
195         } else {
196             if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
197                 // timed out
198                 return false;
199             }
200             // not timed out; spin lock already unlocked, so loop again
201         }
202     } while (true);
203 }
204 
__time_critical_func(mutex_exit)205 void __time_critical_func(mutex_exit)(mutex_t *mtx) {
206 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
207     if (mtx->recursive) {
208         recursive_mutex_exit(mtx);
209         return;
210     }
211 #endif
212     uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
213     assert(lock_is_owner_id_valid(mtx->owner));
214     mtx->owner = LOCK_INVALID_OWNER_ID;
215     lock_internal_spin_unlock_with_notify(&mtx->core, save);
216 }
217 
__time_critical_func(recursive_mutex_exit)218 void __time_critical_func(recursive_mutex_exit)(recursive_mutex_t *mtx) {
219     uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
220     assert(lock_is_owner_id_valid(mtx->owner));
221     assert(mtx->enter_count);
222     if (!--mtx->enter_count) {
223         mtx->owner = LOCK_INVALID_OWNER_ID;
224         lock_internal_spin_unlock_with_notify(&mtx->core, save);
225     } else {
226         spin_unlock(mtx->core.spin_lock, save);
227     }
228 }