1 /*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "pico/mutex.h"
8 #include "pico/time.h"
9
mutex_init(mutex_t * mtx)10 void mutex_init(mutex_t *mtx) {
11 lock_init(&mtx->core, next_striped_spin_lock_num());
12 mtx->owner = LOCK_INVALID_OWNER_ID;
13 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
14 mtx->recursive = false;
15 #endif
16 __mem_fence_release();
17 }
18
recursive_mutex_init(recursive_mutex_t * mtx)19 void recursive_mutex_init(recursive_mutex_t *mtx) {
20 lock_init(&mtx->core, next_striped_spin_lock_num());
21 mtx->owner = LOCK_INVALID_OWNER_ID;
22 mtx->enter_count = 0;
23 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
24 mtx->recursive = true;
25 #endif
26 __mem_fence_release();
27 }
28
__time_critical_func(mutex_enter_blocking)29 void __time_critical_func(mutex_enter_blocking)(mutex_t *mtx) {
30 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
31 if (mtx->recursive) {
32 recursive_mutex_enter_blocking(mtx);
33 return;
34 }
35 #endif
36 lock_owner_id_t caller = lock_get_caller_owner_id();
37 do {
38 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
39 if (!lock_is_owner_id_valid(mtx->owner)) {
40 mtx->owner = caller;
41 spin_unlock(mtx->core.spin_lock, save);
42 break;
43 }
44 lock_internal_spin_unlock_with_wait(&mtx->core, save);
45 } while (true);
46 }
47
__time_critical_func(recursive_mutex_enter_blocking)48 void __time_critical_func(recursive_mutex_enter_blocking)(recursive_mutex_t *mtx) {
49 lock_owner_id_t caller = lock_get_caller_owner_id();
50 do {
51 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
52 if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) {
53 mtx->owner = caller;
54 uint __unused total = ++mtx->enter_count;
55 spin_unlock(mtx->core.spin_lock, save);
56 assert(total); // check for overflow
57 return;
58 } else {
59 lock_internal_spin_unlock_with_wait(&mtx->core, save);
60 }
61 } while (true);
62 }
63
__time_critical_func(mutex_try_enter)64 bool __time_critical_func(mutex_try_enter)(mutex_t *mtx, uint32_t *owner_out) {
65 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
66 if (mtx->recursive) {
67 return recursive_mutex_try_enter(mtx, owner_out);
68 }
69 #endif
70 bool entered;
71 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
72 if (!lock_is_owner_id_valid(mtx->owner)) {
73 mtx->owner = lock_get_caller_owner_id();
74 entered = true;
75 } else {
76 if (owner_out) *owner_out = (uint32_t) mtx->owner;
77 entered = false;
78 }
79 spin_unlock(mtx->core.spin_lock, save);
80 return entered;
81 }
82
__time_critical_func(mutex_try_enter_block_until)83 bool __time_critical_func(mutex_try_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
84 // not using lock_owner_id_t to avoid backwards incompatibility change to mutex_try_enter API
85 static_assert(sizeof(lock_owner_id_t) <= 4, "");
86 uint32_t owner;
87 if (!mutex_try_enter(mtx, &owner)) {
88 if ((lock_owner_id_t)owner == lock_get_caller_owner_id()) return false; // deadlock, so we can never own it
89 return mutex_enter_block_until(mtx, until);
90 }
91 return true;
92 }
93
__time_critical_func(recursive_mutex_try_enter)94 bool __time_critical_func(recursive_mutex_try_enter)(recursive_mutex_t *mtx, uint32_t *owner_out) {
95 bool entered;
96 lock_owner_id_t caller = lock_get_caller_owner_id();
97 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
98 if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
99 mtx->owner = caller;
100 uint __unused total = ++mtx->enter_count;
101 assert(total); // check for overflow
102 entered = true;
103 } else {
104 if (owner_out) *owner_out = (uint32_t) mtx->owner;
105 entered = false;
106 }
107 spin_unlock(mtx->core.spin_lock, save);
108 return entered;
109 }
110
__time_critical_func(mutex_enter_timeout_ms)111 bool __time_critical_func(mutex_enter_timeout_ms)(mutex_t *mtx, uint32_t timeout_ms) {
112 return mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
113 }
114
__time_critical_func(recursive_mutex_enter_timeout_ms)115 bool __time_critical_func(recursive_mutex_enter_timeout_ms)(recursive_mutex_t *mtx, uint32_t timeout_ms) {
116 return recursive_mutex_enter_block_until(mtx, make_timeout_time_ms(timeout_ms));
117 }
118
__time_critical_func(mutex_enter_timeout_us)119 bool __time_critical_func(mutex_enter_timeout_us)(mutex_t *mtx, uint32_t timeout_us) {
120 return mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
121 }
122
__time_critical_func(recursive_mutex_enter_timeout_us)123 bool __time_critical_func(recursive_mutex_enter_timeout_us)(recursive_mutex_t *mtx, uint32_t timeout_us) {
124 return recursive_mutex_enter_block_until(mtx, make_timeout_time_us(timeout_us));
125 }
126
__time_critical_func(mutex_enter_block_until)127 bool __time_critical_func(mutex_enter_block_until)(mutex_t *mtx, absolute_time_t until) {
128 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
129 if (mtx->recursive) {
130 return recursive_mutex_enter_block_until(mtx, until);
131 }
132 #endif
133 assert(mtx->core.spin_lock);
134 lock_owner_id_t caller = lock_get_caller_owner_id();
135 do {
136 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
137 if (!lock_is_owner_id_valid(mtx->owner)) {
138 mtx->owner = caller;
139 spin_unlock(mtx->core.spin_lock, save);
140 return true;
141 } else {
142 if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
143 // timed out
144 return false;
145 }
146 // not timed out; spin lock already unlocked, so loop again
147 }
148 } while (true);
149 }
150
__time_critical_func(recursive_mutex_enter_block_until)151 bool __time_critical_func(recursive_mutex_enter_block_until)(recursive_mutex_t *mtx, absolute_time_t until) {
152 assert(mtx->core.spin_lock);
153 lock_owner_id_t caller = lock_get_caller_owner_id();
154 do {
155 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
156 if (!lock_is_owner_id_valid(mtx->owner) || mtx->owner == caller) {
157 mtx->owner = caller;
158 uint __unused total = ++mtx->enter_count;
159 spin_unlock(mtx->core.spin_lock, save);
160 assert(total); // check for overflow
161 return true;
162 } else {
163 if (lock_internal_spin_unlock_with_best_effort_wait_or_timeout(&mtx->core, save, until)) {
164 // timed out
165 return false;
166 }
167 // not timed out; spin lock already unlocked, so loop again
168 }
169 } while (true);
170 }
171
__time_critical_func(mutex_exit)172 void __time_critical_func(mutex_exit)(mutex_t *mtx) {
173 #if PICO_MUTEX_ENABLE_SDK120_COMPATIBILITY
174 if (mtx->recursive) {
175 recursive_mutex_exit(mtx);
176 return;
177 }
178 #endif
179 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
180 assert(lock_is_owner_id_valid(mtx->owner));
181 mtx->owner = LOCK_INVALID_OWNER_ID;
182 lock_internal_spin_unlock_with_notify(&mtx->core, save);
183 }
184
__time_critical_func(recursive_mutex_exit)185 void __time_critical_func(recursive_mutex_exit)(recursive_mutex_t *mtx) {
186 uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
187 assert(lock_is_owner_id_valid(mtx->owner));
188 assert(mtx->enter_count);
189 if (!--mtx->enter_count) {
190 mtx->owner = LOCK_INVALID_OWNER_ID;
191 lock_internal_spin_unlock_with_notify(&mtx->core, save);
192 } else {
193 spin_unlock(mtx->core.spin_lock, save);
194 }
195 }