1 /*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef _HARDWARE_SYNC_H
8 #define _HARDWARE_SYNC_H
9
10 #include "pico.h"
11 #include "hardware/address_mapped.h"
12 #include "hardware/regs/sio.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 /** \file hardware/sync.h
19 * \defgroup hardware_sync hardware_sync
20 *
21 * Low level hardware spin locks, barrier and processor event APIs
22 *
23 * Spin Locks
24 * ----------
25 *
26 * The RP2040 provides 32 hardware spin locks, which can be used to manage mutually-exclusive access to shared software
27 * and hardware resources.
28 *
29 * Generally each spin lock itself is a shared resource,
30 * i.e. the same hardware spin lock can be used by multiple higher level primitives (as long as the spin locks are neither held for long periods, nor
31 * held concurrently with other spin locks by the same core - which could lead to deadlock). A hardware spin lock that is exclusively owned can be used
32 * individually without more flexibility and without regard to other software. Note that no hardware spin lock may
33 * be acquired re-entrantly (i.e. hardware spin locks are not on their own safe for use by both thread code and IRQs) however the default spinlock related
34 * methods here (e.g. \ref spin_lock_blocking) always disable interrupts while the lock is held as use by IRQ handlers and user code is common/desirable,
35 * and spin locks are only expected to be held for brief periods.
36 *
37 * The SDK uses the following default spin lock assignments, classifying which spin locks are reserved for exclusive/special purposes
38 * vs those suitable for more general shared use:
39 *
40 * Number (ID) | Description
41 * :---------: | -----------
42 * 0-13 | Currently reserved for exclusive use by the SDK and other libraries. If you use these spin locks, you risk breaking SDK or other library functionality. Each reserved spin lock used individually has its own PICO_SPINLOCK_ID so you can search for those.
43 * 14,15 | (\ref PICO_SPINLOCK_ID_OS1 and \ref PICO_SPINLOCK_ID_OS2). Currently reserved for exclusive use by an operating system (or other system level software) co-existing with the SDK.
44 * 16-23 | (\ref PICO_SPINLOCK_ID_STRIPED_FIRST - \ref PICO_SPINLOCK_ID_STRIPED_LAST). Spin locks from this range are assigned in a round-robin fashion via \ref next_striped_spin_lock_num(). These spin locks are shared, but assigning numbers from a range reduces the probability that two higher level locking primitives using _striped_ spin locks will actually be using the same spin lock.
45 * 24-31 | (\ref PICO_SPINLOCK_ID_CLAIM_FREE_FIRST - \ref PICO_SPINLOCK_ID_CLAIM_FREE_LAST). These are reserved for exclusive use and are allocated on a first come first served basis at runtime via \ref spin_lock_claim_unused()
46 */
47
48 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_SYNC, Enable/disable assertions in the HW sync module, type=bool, default=0, group=hardware_sync
49 #ifndef PARAM_ASSERTIONS_ENABLED_SYNC
50 #define PARAM_ASSERTIONS_ENABLED_SYNC 0
51 #endif
52
53 /** \brief A spin lock identifier
54 * \ingroup hardware_sync
55 */
56 typedef volatile uint32_t spin_lock_t;
57
58 // PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
59 #ifndef PICO_SPINLOCK_ID_IRQ
60 #define PICO_SPINLOCK_ID_IRQ 9
61 #endif
62
63 // PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
64 #ifndef PICO_SPINLOCK_ID_TIMER
65 #define PICO_SPINLOCK_ID_TIMER 10
66 #endif
67
68 // PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
69 #ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
70 #define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
71 #endif
72
73 // PICO_CONFIG: PICO_SPINLOCK_ID_RAND, Spinlock ID for Random Number Generator, min=0, max=31, default=12, group=hardware_sync
74 #ifndef PICO_SPINLOCK_ID_RAND
75 #define PICO_SPINLOCK_ID_RAND 12
76 #endif
77
78 // PICO_CONFIG: PICO_SPINLOCK_ID_OS1, First Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=14, group=hardware_sync
79 #ifndef PICO_SPINLOCK_ID_OS1
80 #define PICO_SPINLOCK_ID_OS1 14
81 #endif
82
83 // PICO_CONFIG: PICO_SPINLOCK_ID_OS2, Second Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=15, group=hardware_sync
84 #ifndef PICO_SPINLOCK_ID_OS2
85 #define PICO_SPINLOCK_ID_OS2 15
86 #endif
87
88 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Lowest Spinlock ID in the 'striped' range, min=0, max=31, default=16, group=hardware_sync
89 #ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
90 #define PICO_SPINLOCK_ID_STRIPED_FIRST 16
91 #endif
92
93 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Highest Spinlock ID in the 'striped' range, min=0, max=31, default=23, group=hardware_sync
94 #ifndef PICO_SPINLOCK_ID_STRIPED_LAST
95 #define PICO_SPINLOCK_ID_STRIPED_LAST 23
96 #endif
97
98 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Lowest Spinlock ID in the 'claim free' range, min=0, max=31, default=24, group=hardware_sync
99 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
100 #define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
101 #endif
102
103 #ifdef PICO_SPINLOCK_ID_CLAIM_FREE_END
104 #warning PICO_SPINLOCK_ID_CLAIM_FREE_END has been renamed to PICO_SPINLOCK_ID_CLAIM_FREE_LAST
105 #endif
106
107 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_LAST, Highest Spinlock ID in the 'claim free' range, min=0, max=31, default=31, group=hardware_sync
108 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_LAST
109 #define PICO_SPINLOCK_ID_CLAIM_FREE_LAST 31
110 #endif
111
112 /*! \brief Insert a SEV instruction in to the code path.
113 * \ingroup hardware_sync
114
115 * The SEV (send event) instruction sends an event to both cores.
116 */
__sev(void)117 __force_inline static void __sev(void) {
118 __asm volatile ("sev");
119 }
120
121 /*! \brief Insert a WFE instruction in to the code path.
122 * \ingroup hardware_sync
123 *
124 * The WFE (wait for event) instruction waits until one of a number of
125 * events occurs, including events signalled by the SEV instruction on either core.
126 */
__wfe(void)127 __force_inline static void __wfe(void) {
128 __asm volatile ("wfe");
129 }
130
131 /*! \brief Insert a WFI instruction in to the code path.
132 * \ingroup hardware_sync
133 *
134 * The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
135 */
__wfi(void)136 __force_inline static void __wfi(void) {
137 __asm volatile ("wfi");
138 }
139
140 /*! \brief Insert a DMB instruction in to the code path.
141 * \ingroup hardware_sync
142 *
143 * The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
144 * instruction will be observed before any explicit access after the instruction.
145 */
__dmb(void)146 __force_inline static void __dmb(void) {
147 __asm volatile ("dmb" : : : "memory");
148 }
149
150 /*! \brief Insert a DSB instruction in to the code path.
151 * \ingroup hardware_sync
152 *
153 * The DSB (data synchronization barrier) acts as a special kind of data
154 * memory barrier (DMB). The DSB operation completes when all explicit memory
155 * accesses before this instruction complete.
156 */
__dsb(void)157 __force_inline static void __dsb(void) {
158 __asm volatile ("dsb" : : : "memory");
159 }
160
161 /*! \brief Insert a ISB instruction in to the code path.
162 * \ingroup hardware_sync
163 *
164 * ISB acts as an instruction synchronization barrier. It flushes the pipeline of the processor,
165 * so that all instructions following the ISB are fetched from cache or memory again, after
166 * the ISB instruction has been completed.
167 */
__isb(void)168 __force_inline static void __isb(void) {
169 __asm volatile ("isb");
170 }
171
172 /*! \brief Acquire a memory fence
173 * \ingroup hardware_sync
174 */
__mem_fence_acquire(void)175 __force_inline static void __mem_fence_acquire(void) {
176 // the original code below makes it hard for us to be included from C++ via a header
177 // which itself is in an extern "C", so just use __dmb instead, which is what
178 // is required on Cortex M0+
179 __dmb();
180 //#ifndef __cplusplus
181 // atomic_thread_fence(memory_order_acquire);
182 //#else
183 // std::atomic_thread_fence(std::memory_order_acquire);
184 //#endif
185 }
186
187 /*! \brief Release a memory fence
188 * \ingroup hardware_sync
189 *
190 */
__mem_fence_release(void)191 __force_inline static void __mem_fence_release(void) {
192 // the original code below makes it hard for us to be included from C++ via a header
193 // which itself is in an extern "C", so just use __dmb instead, which is what
194 // is required on Cortex M0+
195 __dmb();
196 //#ifndef __cplusplus
197 // atomic_thread_fence(memory_order_release);
198 //#else
199 // std::atomic_thread_fence(std::memory_order_release);
200 //#endif
201 }
202
203 /*! \brief Save and disable interrupts
204 * \ingroup hardware_sync
205 *
206 * \return The prior interrupt enable status for restoration later via restore_interrupts()
207 */
save_and_disable_interrupts(void)208 __force_inline static uint32_t save_and_disable_interrupts(void) {
209 uint32_t status;
210 __asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
211 __asm volatile ("cpsid i");
212 return status;
213 }
214
215 /*! \brief Restore interrupts to a specified state
216 * \ingroup hardware_sync
217 *
218 * \param status Previous interrupt status from save_and_disable_interrupts()
219 */
restore_interrupts(uint32_t status)220 __force_inline static void restore_interrupts(uint32_t status) {
221 __asm volatile ("msr PRIMASK,%0"::"r" (status) : );
222 }
223
224 /*! \brief Get HW Spinlock instance from number
225 * \ingroup hardware_sync
226 *
227 * \param lock_num Spinlock ID
228 * \return The spinlock instance
229 */
spin_lock_instance(uint lock_num)230 __force_inline static spin_lock_t *spin_lock_instance(uint lock_num) {
231 invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
232 return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
233 }
234
235 /*! \brief Get HW Spinlock number from instance
236 * \ingroup hardware_sync
237 *
238 * \param lock The Spinlock instance
239 * \return The Spinlock ID
240 */
spin_lock_get_num(spin_lock_t * lock)241 __force_inline static uint spin_lock_get_num(spin_lock_t *lock) {
242 invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
243 (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
244 ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
245 return (uint) (lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET));
246 }
247
248 /*! \brief Acquire a spin lock without disabling interrupts (hence unsafe)
249 * \ingroup hardware_sync
250 *
251 * \param lock Spinlock instance
252 */
spin_lock_unsafe_blocking(spin_lock_t * lock)253 __force_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
254 // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
255 // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
256 // anyway which should be finished soon
257 while (__builtin_expect(!*lock, 0));
258 __mem_fence_acquire();
259 }
260
261 /*! \brief Release a spin lock without re-enabling interrupts
262 * \ingroup hardware_sync
263 *
264 * \param lock Spinlock instance
265 */
spin_unlock_unsafe(spin_lock_t * lock)266 __force_inline static void spin_unlock_unsafe(spin_lock_t *lock) {
267 __mem_fence_release();
268 *lock = 0;
269 }
270
271 /*! \brief Acquire a spin lock safely
272 * \ingroup hardware_sync
273 *
274 * This function will disable interrupts prior to acquiring the spinlock
275 *
276 * \param lock Spinlock instance
277 * \return interrupt status to be used when unlocking, to restore to original state
278 */
spin_lock_blocking(spin_lock_t * lock)279 __force_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
280 uint32_t save = save_and_disable_interrupts();
281 spin_lock_unsafe_blocking(lock);
282 return save;
283 }
284
285 /*! \brief Check to see if a spinlock is currently acquired elsewhere.
286 * \ingroup hardware_sync
287 *
288 * \param lock Spinlock instance
289 */
is_spin_locked(spin_lock_t * lock)290 inline static bool is_spin_locked(spin_lock_t *lock) {
291 check_hw_size(spin_lock_t, 4);
292 uint lock_num = spin_lock_get_num(lock);
293 return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
294 }
295
296 /*! \brief Release a spin lock safely
297 * \ingroup hardware_sync
298 *
299 * This function will re-enable interrupts according to the parameters.
300 *
301 * \param lock Spinlock instance
302 * \param saved_irq Return value from the \ref spin_lock_blocking() function.
303 * \return interrupt status to be used when unlocking, to restore to original state
304 *
305 * \sa spin_lock_blocking()
306 */
spin_unlock(spin_lock_t * lock,uint32_t saved_irq)307 __force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
308 spin_unlock_unsafe(lock);
309 restore_interrupts(saved_irq);
310 }
311
312 /*! \brief Initialise a spin lock
313 * \ingroup hardware_sync
314 *
315 * The spin lock is initially unlocked
316 *
317 * \param lock_num The spin lock number
318 * \return The spin lock instance
319 */
320 spin_lock_t *spin_lock_init(uint lock_num);
321
322 /*! \brief Release all spin locks
323 * \ingroup hardware_sync
324 */
325 void spin_locks_reset(void);
326
327 /*! \brief Return a spin lock number from the _striped_ range
328 * \ingroup hardware_sync
329 *
330 * Returns a spin lock number in the range PICO_SPINLOCK_ID_STRIPED_FIRST to PICO_SPINLOCK_ID_STRIPED_LAST
331 * in a round robin fashion. This does not grant the caller exclusive access to the spin lock, so the caller
332 * must:
333 *
334 * -# Abide (with other callers) by the contract of only holding this spin lock briefly (and with IRQs disabled - the default via \ref spin_lock_blocking()),
335 * and not whilst holding other spin locks.
336 * -# Be OK with any contention caused by the - brief due to the above requirement - contention with other possible users of the spin lock.
337 *
338 * \return lock_num a spin lock number the caller may use (non exclusively)
339 * \see PICO_SPINLOCK_ID_STRIPED_FIRST
340 * \see PICO_SPINLOCK_ID_STRIPED_LAST
341 */
342 uint next_striped_spin_lock_num(void);
343
344 /*! \brief Mark a spin lock as used
345 * \ingroup hardware_sync
346 *
347 * Method for cooperative claiming of hardware. Will cause a panic if the spin lock
348 * is already claimed. Use of this method by libraries detects accidental
349 * configurations that would fail in unpredictable ways.
350 *
351 * \param lock_num the spin lock number
352 */
353 void spin_lock_claim(uint lock_num);
354
355 /*! \brief Mark multiple spin locks as used
356 * \ingroup hardware_sync
357 *
358 * Method for cooperative claiming of hardware. Will cause a panic if any of the spin locks
359 * are already claimed. Use of this method by libraries detects accidental
360 * configurations that would fail in unpredictable ways.
361 *
362 * \param lock_num_mask Bitfield of all required spin locks to claim (bit 0 == spin lock 0, bit 1 == spin lock 1 etc)
363 */
364 void spin_lock_claim_mask(uint32_t lock_num_mask);
365
366 /*! \brief Mark a spin lock as no longer used
367 * \ingroup hardware_sync
368 *
369 * Method for cooperative claiming of hardware.
370 *
371 * \param lock_num the spin lock number to release
372 */
373 void spin_lock_unclaim(uint lock_num);
374
375 /*! \brief Claim a free spin lock
376 * \ingroup hardware_sync
377 *
378 * \param required if true the function will panic if none are available
379 * \return the spin lock number or -1 if required was false, and none were free
380 */
381 int spin_lock_claim_unused(bool required);
382
383 /*! \brief Determine if a spin lock is claimed
384 * \ingroup hardware_sync
385 *
386 * \param lock_num the spin lock number
387 * \return true if claimed, false otherwise
388 * \see spin_lock_claim
389 * \see spin_lock_claim_mask
390 */
391 bool spin_lock_is_claimed(uint lock_num);
392
393 #define remove_volatile_cast(t, x) ({__mem_fence_acquire(); (t)(x); })
394
395 #ifdef __cplusplus
396 }
397 #endif
398
399 #endif
400