1 /*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef _HARDWARE_SYNC_H
8 #define _HARDWARE_SYNC_H
9
10 #include "pico.h"
11 #include "hardware/address_mapped.h"
12
13 #ifdef __riscv
14 #include "hardware/hazard3.h"
15 #endif
16
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20
21 /** \file hardware/sync.h
22 * \defgroup hardware_sync hardware_sync
23 *
24 * \brief Low level hardware spin locks, barrier and processor event APIs
25 *
26 * Spin Locks
27 * ----------
28 *
29 * The RP-series microcontrollers provide 32 hardware spin locks, which can be used to manage mutually-exclusive access to shared software
30 * and hardware resources.
31 *
32 * Generally each spin lock itself is a shared resource,
33 * i.e. the same hardware spin lock can be used by multiple higher level primitives (as long as the spin locks are neither held for long periods, nor
34 * held concurrently with other spin locks by the same core - which could lead to deadlock). A hardware spin lock that is exclusively owned can be used
35 * individually without more flexibility and without regard to other software. Note that no hardware spin lock may
36 * be acquired re-entrantly (i.e. hardware spin locks are not on their own safe for use by both thread code and IRQs) however the default spinlock related
37 * methods here (e.g. \ref spin_lock_blocking) always disable interrupts while the lock is held as use by IRQ handlers and user code is common/desirable,
38 * and spin locks are only expected to be held for brief periods.
39 *
40 * \if rp2350_specific
41 * RP2350 Warning. Due to erratum RP2350-E2, writes to new SIO registers above an offset of +0x180 alias the spinlocks, causing spurious lock releases.
42 * This SDK by default uses atomic memory accesses to implement the hardware_sync_spin_lock API, as a workaround on RP2350 A2.
43 * \endif
44 *
45 * The SDK uses the following default spin lock assignments, classifying which spin locks are reserved for exclusive/special purposes
46 * vs those suitable for more general shared use:
47 *
48 * Number (ID) | Description
49 * :---------: | -----------
50 * 0-13 | Currently reserved for exclusive use by the SDK and other libraries. If you use these spin locks, you risk breaking SDK or other library functionality. Each reserved spin lock used individually has its own PICO_SPINLOCK_ID so you can search for those.
51 * 14,15 | (\ref PICO_SPINLOCK_ID_OS1 and \ref PICO_SPINLOCK_ID_OS2). Currently reserved for exclusive use by an operating system (or other system level software) co-existing with the SDK.
52 * 16-23 | (\ref PICO_SPINLOCK_ID_STRIPED_FIRST - \ref PICO_SPINLOCK_ID_STRIPED_LAST). Spin locks from this range are assigned in a round-robin fashion via \ref next_striped_spin_lock_num(). These spin locks are shared, but assigning numbers from a range reduces the probability that two higher level locking primitives using _striped_ spin locks will actually be using the same spin lock.
53 * 24-31 | (\ref PICO_SPINLOCK_ID_CLAIM_FREE_FIRST - \ref PICO_SPINLOCK_ID_CLAIM_FREE_LAST). These are reserved for exclusive use and are allocated on a first come first served basis at runtime via \ref spin_lock_claim_unused()
54 */
55
56 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_HARDWARE_SYNC, Enable/disable assertions in the hardware_sync module, type=bool, default=0, group=hardware_sync
57 #ifndef PARAM_ASSERTIONS_ENABLED_HARDWARE_SYNC
58 #ifdef PARAM_ASSERTIONS_ENABLED_SYNC // backwards compatibility with SDK < 2.0.0
59 #define PARAM_ASSERTIONS_ENABLED_HARDWARE_SYNC PARAM_ASSERTIONS_ENABLED_SYNC
60 #else
61 #define PARAM_ASSERTIONS_ENABLED_HARDWARE_SYNC 0
62 #endif
63 #endif
64
65 /*! \brief Insert a NOP instruction in to the code path.
66 * \ingroup hardware_sync
67 *
68 * NOP does nothing for one cycle. On RP2350 Arm binaries this is forced to be
69 * a 32-bit instruction to avoid dual-issue of NOPs.
70 */
__nop(void)71 __force_inline static void __nop(void) {
72 #if !__ARM_ARCH_6M__
73 #ifdef __riscv
74 __asm volatile ("nop");
75 #else
76 __asm volatile ("nop.w");
77 #endif
78 #else
79 __asm volatile ("nop");
80 #endif
81 }
82
83
84 /*! \brief Insert a SEV instruction in to the code path.
85 * \ingroup hardware_sync
86
87 * The SEV (send event) instruction sends an event to both cores.
88 */
89 #if !__has_builtin(__sev)
__sev(void)90 __force_inline static void __sev(void) {
91 #ifdef __riscv
92 __hazard3_unblock();
93 #else
94 pico_default_asm_volatile ("sev");
95 #endif
96 }
97 #endif
98
99 /*! \brief Insert a WFE instruction in to the code path.
100 * \ingroup hardware_sync
101 *
102 * The WFE (wait for event) instruction waits until one of a number of
103 * events occurs, including events signalled by the SEV instruction on either core.
104 */
105 #if !__has_builtin(__wfe)
__wfe(void)106 __force_inline static void __wfe(void) {
107 #ifdef __riscv
108 __hazard3_block();
109 #else
110 pico_default_asm_volatile ("wfe");
111 #endif
112 }
113 #endif
114
115 /*! \brief Insert a WFI instruction in to the code path.
116 * \ingroup hardware_sync
117 *
118 * The WFI (wait for interrupt) instruction waits for a interrupt to wake up the core.
119 */
120 #if !__has_builtin(__wfi)
__wfi(void)121 __force_inline static void __wfi(void) {
122 pico_default_asm_volatile("wfi");
123 }
124 #endif
125
126 /*! \brief Insert a DMB instruction in to the code path.
127 * \ingroup hardware_sync
128 *
129 * The DMB (data memory barrier) acts as a memory barrier, all memory accesses prior to this
130 * instruction will be observed before any explicit access after the instruction.
131 */
__dmb(void)132 __force_inline static void __dmb(void) {
133 #ifdef __riscv
134 __asm volatile ("fence rw, rw" : : : "memory");
135 #else
136 pico_default_asm_volatile ("dmb" : : : "memory");
137 #endif
138 }
139
140 /*! \brief Insert a DSB instruction in to the code path.
141 * \ingroup hardware_sync
142 *
143 * The DSB (data synchronization barrier) acts as a special kind of data
144 * memory barrier (DMB). The DSB operation completes when all explicit memory
145 * accesses before this instruction complete.
146 */
__dsb(void)147 __force_inline static void __dsb(void) {
148 #ifdef __riscv
149 __asm volatile ("fence rw, rw" : : : "memory");
150 #else
151 pico_default_asm_volatile ("dsb" : : : "memory");
152 #endif
153 }
154
155 /*! \brief Insert a ISB instruction in to the code path.
156 * \ingroup hardware_sync
157 *
158 * ISB acts as an instruction synchronization barrier. It flushes the pipeline of the processor,
159 * so that all instructions following the ISB are fetched from cache or memory again, after
160 * the ISB instruction has been completed.
161 */
__isb(void)162 __force_inline static void __isb(void) {
163 #ifdef __riscv
164 __asm volatile ("fence.i" : : : "memory");
165 #else
166 pico_default_asm_volatile("isb" ::: "memory");
167 #endif
168 }
169
170 /*! \brief Acquire a memory fence
171 * \ingroup hardware_sync
172 */
__mem_fence_acquire(void)173 __force_inline static void __mem_fence_acquire(void) {
174 // the original code below makes it hard for us to be included from C++ via a header
175 // which itself is in an extern "C", so just use __dmb instead, which is what
176 // is required on Cortex M0+
177 __dmb();
178 //#ifndef __cplusplus
179 // atomic_thread_fence(memory_order_acquire);
180 //#else
181 // std::atomic_thread_fence(std::memory_order_acquire);
182 //#endif
183 }
184
185 /*! \brief Release a memory fence
186 * \ingroup hardware_sync
187 *
188 */
__mem_fence_release(void)189 __force_inline static void __mem_fence_release(void) {
190 // the original code below makes it hard for us to be included from C++ via a header
191 // which itself is in an extern "C", so just use __dmb instead, which is what
192 // is required on Cortex M0+
193 __dmb();
194 //#ifndef __cplusplus
195 // atomic_thread_fence(memory_order_release);
196 //#else
197 // std::atomic_thread_fence(std::memory_order_release);
198 //#endif
199 }
200
201 /*! \brief Save and disable interrupts
202 * \ingroup hardware_sync
203 *
204 * \return The prior interrupt enable status for restoration later via restore_interrupts()
205 */
save_and_disable_interrupts(void)206 __force_inline static uint32_t save_and_disable_interrupts(void) {
207 uint32_t status;
208 #ifdef __riscv
209 pico_default_asm_volatile (
210 "csrrci %0, mstatus, 0x8\n"
211 : "=r" (status) :: "memory"
212 );
213 #else
214 pico_default_asm_volatile (
215 "mrs %0, PRIMASK\n"
216 "cpsid i"
217 : "=r" (status) :: "memory");
218 #endif
219 return status;
220 }
221
222 /*! \brief Restore interrupts to a specified state
223 * \ingroup hardware_sync
224 *
225 * \param status Previous interrupt status from save_and_disable_interrupts()
226 */
restore_interrupts(uint32_t status)227 __force_inline static void restore_interrupts(uint32_t status) {
228 #ifdef __riscv
229 __compiler_memory_barrier();
230 if (status & 0x8) {
231 riscv_set_csr(mstatus, 8);
232 } else {
233 riscv_clear_csr(mstatus, 8);
234 }
235 __compiler_memory_barrier();
236 #else
237 pico_default_asm_volatile ("msr PRIMASK,%0"::"r" (status) : "memory" );
238 #endif
239 }
240
241 /*! \brief Restore interrupts to a specified state with restricted transitions
242 * \ingroup hardware_sync
243 *
244 * This method should only be used when the interrupt state is known to be disabled,
245 * e.g. when paired with \ref save_and_disable_interrupts()
246 *
247 * \param status Previous interrupt status from save_and_disable_interrupts()
248 */
restore_interrupts_from_disabled(uint32_t status)249 __force_inline static void restore_interrupts_from_disabled(uint32_t status) {
250 #ifdef __riscv
251 // on RISC-V this can enable interrupts, but not disable interrupts... which
252 // is the common case and doesn't require a branch
253 __compiler_memory_barrier();
254 riscv_set_csr(mstatus, status & 8);
255 __compiler_memory_barrier();
256 #else
257 // on ARM, this behaves the same as restore_interrupts()
258 pico_default_asm_volatile ("msr PRIMASK,%0"::"r" (status) : "memory" );
259 #endif
260 }
261
262 #include "hardware/sync/spin_lock.h"
263
264 /*! \brief Return a spin lock number from the _striped_ range
265 * \ingroup hardware_sync
266 *
267 * Returns a spin lock number in the range PICO_SPINLOCK_ID_STRIPED_FIRST to PICO_SPINLOCK_ID_STRIPED_LAST
268 * in a round robin fashion. This does not grant the caller exclusive access to the spin lock, so the caller
269 * must:
270 *
271 * -# Abide (with other callers) by the contract of only holding this spin lock briefly (and with IRQs disabled - the default via \ref spin_lock_blocking()),
272 * and not whilst holding other spin locks.
273 * -# Be OK with any contention caused by the - brief due to the above requirement - contention with other possible users of the spin lock.
274 *
275 * \return lock_num a spin lock number the caller may use (non exclusively)
276 * \see PICO_SPINLOCK_ID_STRIPED_FIRST
277 * \see PICO_SPINLOCK_ID_STRIPED_LAST
278 */
279 uint next_striped_spin_lock_num(void);
280
281 /*! \brief Mark a spin lock as used
282 * \ingroup hardware_sync
283 *
284 * Method for cooperative claiming of hardware. Will cause a panic if the spin lock
285 * is already claimed. Use of this method by libraries detects accidental
286 * configurations that would fail in unpredictable ways.
287 *
288 * \param lock_num the spin lock number
289 */
290 void spin_lock_claim(uint lock_num);
291
292 /*! \brief Mark multiple spin locks as used
293 * \ingroup hardware_sync
294 *
295 * Method for cooperative claiming of hardware. Will cause a panic if any of the spin locks
296 * are already claimed. Use of this method by libraries detects accidental
297 * configurations that would fail in unpredictable ways.
298 *
299 * \param lock_num_mask Bitfield of all required spin locks to claim (bit 0 == spin lock 0, bit 1 == spin lock 1 etc)
300 */
301 void spin_lock_claim_mask(uint32_t lock_num_mask);
302
303 /*! \brief Mark a spin lock as no longer used
304 * \ingroup hardware_sync
305 *
306 * Method for cooperative claiming of hardware.
307 *
308 * \param lock_num the spin lock number to release
309 */
310 void spin_lock_unclaim(uint lock_num);
311
312 /*! \brief Claim a free spin lock
313 * \ingroup hardware_sync
314 *
315 * \param required if true the function will panic if none are available
316 * \return the spin lock number or -1 if required was false, and none were free
317 */
318 int spin_lock_claim_unused(bool required);
319
320 /*! \brief Determine if a spin lock is claimed
321 * \ingroup hardware_sync
322 *
323 * \param lock_num the spin lock number
324 * \return true if claimed, false otherwise
325 * \see spin_lock_claim
326 * \see spin_lock_claim_mask
327 */
328 bool spin_lock_is_claimed(uint lock_num);
329
330 // no longer use __mem_fence_acquire here, as it is overkill on cortex M0+
331 #if PICO_C_COMPILER_IS_GNU
332 #define remove_volatile_cast(t, x) (t)(x)
333 #define remove_volatile_cast_no_barrier(t, x) (t)(x)
334 #else
335 #define remove_volatile_cast(t, x) ({__compiler_memory_barrier(); Clang_Pragma("clang diagnostic push"); Clang_Pragma("clang diagnostic ignored \"-Wcast-qual\""); (t)(x); Clang_Pragma("clang diagnostic pop"); })
336 #define remove_volatile_cast_no_barrier(t, x) ({ Clang_Pragma("clang diagnostic push"); Clang_Pragma("clang diagnostic ignored \"-Wcast-qual\""); (t)(x); Clang_Pragma("clang diagnostic pop"); })
337 #endif
338
339 #ifdef __cplusplus
340 }
341 #endif
342
343 #endif
344