1 /*
2  * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef _PICO_LOCK_CORE_H
8 #define _PICO_LOCK_CORE_H
9 
10 #include "pico.h"
11 #include "pico/time.h"
12 #include "hardware/sync.h"
13 
14 /** \file lock_core.h
15  *  \defgroup lock_core lock_core
16  *  \ingroup pico_sync
17  * \brief base synchronization/lock primitive support.
18  *
19  * Most of the pico_sync locking primitives contain a lock_core_t structure member. This currently just holds a spin
20  * lock which is used only to protect the contents of the rest of the structure as part of implementing the synchronization
21  * primitive. As such, the spin_lock member of lock core is never still held on return from any function for the primitive.
22  *
23  * \ref critical_section is an exceptional case in that it does not have a lock_core_t and simply wraps a spin lock, providing
24  * methods to lock and unlock said spin lock.
25  *
26  * lock_core based structures work by locking the spin lock, checking state, and then deciding whether they additionally need to block
27  * or notify when the spin lock is released. In the blocking case, they will wake up again in the future, and try the process again.
28  *
29  * By default the SDK just uses the processors' events via SEV and WEV for notification and blocking as these are sufficient for
30  * cross core, and notification from interrupt handlers. However macros are defined in this file that abstract the wait
31  * and notify mechanisms to allow the SDK locking functions to effectively be used within an RTOS or other environment.
32  *
33  * When implementing an RTOS, it is desirable for the SDK synchronization primitives that wait, to block the calling task (and immediately yield),
34  * and those that notify, to wake a blocked task which isn't on processor. At least the wait macro implementation needs to be atomic with the protecting
35  * spin_lock unlock from the callers point of view; i.e. the task should unlock the spin lock when it starts its wait. Such implementation is
36  * up to the RTOS integration, however the macros are defined such that such operations are always combined into a single call
37  * (so they can be performed atomically) even though the default implementation does not need this, as a WFE which starts
38  * following the corresponding SEV is not missed.
39  */
40 
41 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_LOCK_CORE, Enable/disable assertions in the lock core, type=bool, default=0, group=pico_sync
42 #ifndef PARAM_ASSERTIONS_ENABLED_LOCK_CORE
43 #define PARAM_ASSERTIONS_ENABLED_LOCK_CORE 0
44 #endif
45 
46 /** \file lock_core.h
47  *  \ingroup lock_core
48  *
49  * Base implementation for locking primitives protected by a spin lock. The spin lock is only used to protect
50  * access to the remaining lock state (in primitives using lock_core); it is never left locked outside
51  * of the function implementations
52  */
53 struct lock_core {
54     // spin lock protecting this lock's state
55     spin_lock_t *spin_lock;
56 
57     // note any lock members in containing structures need not be volatile;
58     // they are protected by memory/compiler barriers when gaining and release spin locks
59 };
60 
61 typedef struct lock_core lock_core_t;
62 
63 /*! \brief  Initialise a lock structure
64  *  \ingroup lock_core
65  *
66  * Inititalize a lock structure, providing the spin lock number to use for protecting internal state.
67  *
68  * \param core Pointer to the lock_core to initialize
69  * \param lock_num Spin lock number to use for the lock. As the spin lock is only used internally to the locking primitive
70  *                 method implementations, this does not need to be globally unique, however could suffer contention
71  */
72 void lock_init(lock_core_t *core, uint lock_num);
73 
74 #ifndef lock_owner_id_t
75 /*! \brief  type to use to store the 'owner' of a lock.
76  *  \ingroup lock_core
77  *
78  * By default this is int8_t as it only needs to store the core number or -1, however it may be
79  * overridden if a larger type is required (e.g. for an RTOS task id)
80  */
81 #define lock_owner_id_t int8_t
82 #endif
83 
84 #ifndef LOCK_INVALID_OWNER_ID
85 /*! \brief  marker value to use for a lock_owner_id_t which does not refer to any valid owner
86  *  \ingroup lock_core
87  */
88 #define LOCK_INVALID_OWNER_ID ((lock_owner_id_t)-1)
89 #endif
90 
91 #ifndef lock_get_caller_owner_id
92 /*! \brief  return the owner id for the caller
93  *  \ingroup lock_core
94  *
95  * By default this returns the calling core number, but may be overridden (e.g. to return an RTOS task id)
96  */
97 #define lock_get_caller_owner_id() ((lock_owner_id_t)get_core_num())
98 #ifndef lock_is_owner_id_valid
99 #define lock_is_owner_id_valid(id) ((id)>=0)
100 #endif
101 #endif
102 
103 #ifndef lock_is_owner_id_valid
104 #define lock_is_owner_id_valid(id) ((id) != LOCK_INVALID_OWNER_ID)
105 #endif
106 
107 #ifndef lock_internal_spin_unlock_with_wait
108 /*! \brief   Atomically unlock the lock's spin lock, and wait for a notification.
109  *  \ingroup lock_core
110  *
111  * _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify
112  * to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing
113  * a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify
114  * for the same lock, which completes after this call starts.
115  *
116  * In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify
117  * has subsequently been called on the same lock instance, however this method is free to return at _any_ point before that;
118  * this macro is _always_ used in a loop which locks the spin lock, checks the internal locking primitive state and then
119  * waits again if the calling thread should not proceed.
120  *
121  * By default this macro simply unlocks the spin lock, and then performs a WFE, but may be overridden
122  * (e.g. to actually block the RTOS task).
123  *
124  * \param lock the lock_core for the primitive which needs to block
125  * \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the `PRIMASK`
126  *             state when the spin lock was acquire
127  */
128 #define lock_internal_spin_unlock_with_wait(lock, save) spin_unlock((lock)->spin_lock, save), __wfe()
129 #endif
130 
131 #ifndef lock_internal_spin_unlock_with_notify
132 /*! \brief   Atomically unlock the lock's spin lock, and send a notification
133  *  \ingroup lock_core
134  *
135  * _Atomic_ here refers to the fact that it should not be possible for this notification to happen during a
136  * lock_internal_spin_unlock_with_wait in a way that that wait does not see the notification (i.e. causing
137  * a missed notification). In other words this method should always wake up any lock_internal_spin_unlock_with_wait
138  * which started before this call completes.
139  *
140  * In an ideal implementation, this method would wake up only the corresponding lock_internal_spin_unlock_with_wait
141  * that has been called on the same lock instance, however it is free to wake up any of them, as they will check
142  * their condition and then re-wait if necessary/
143  *
144  * By default this macro simply unlocks the spin lock, and then performs a SEV, but may be overridden
145  * (e.g. to actually un-block RTOS task(s)).
146  *
147  * \param lock the lock_core for the primitive which needs to block
148  * \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK
149  *             state when the spin lock was acquire)
150  */
151 #define lock_internal_spin_unlock_with_notify(lock, save) spin_unlock((lock)->spin_lock, save), __sev()
152 #endif
153 
154 #ifndef lock_internal_spin_unlock_with_best_effort_wait_or_timeout
155 /*! \brief   Atomically unlock the lock's spin lock, and wait for a notification or a timeout
156  *  \ingroup lock_core
157  *
158  * _Atomic_ here refers to the fact that it should not be possible for a concurrent lock_internal_spin_unlock_with_notify
159  * to insert itself between the spin unlock and this wait in a way that the wait does not see the notification (i.e. causing
160  * a missed notification). In other words this method should always wake up in response to a lock_internal_spin_unlock_with_notify
161  * for the same lock, which completes after this call starts.
162  *
163  * In an ideal implementation, this method would return exactly after the corresponding lock_internal_spin_unlock_with_notify
164  * has subsequently been called on the same lock instance or the timeout has been reached, however this method is free to return
165  * at _any_ point before that; this macro is _always_ used in a loop which locks the spin lock, checks the internal locking
166  * primitive state and then waits again if the calling thread should not proceed.
167  *
168  * By default this simply unlocks the spin lock, and then calls \ref best_effort_wfe_or_timeout
169  * but may be overridden (e.g. to actually block the RTOS task with a timeout).
170  *
171  * \param lock the lock_core for the primitive which needs to block
172  * \param save the uint32_t value that should be passed to spin_unlock when the spin lock is unlocked. (i.e. the PRIMASK
173  *             state when the spin lock was acquire)
174  * \param until the \ref absolute_time_t value
175  * \return true if the timeout has been reached
176  */
177 #define lock_internal_spin_unlock_with_best_effort_wait_or_timeout(lock, save, until) ({ \
178     spin_unlock((lock)->spin_lock, save);                                                \
179     best_effort_wfe_or_timeout(until);                                                   \
180 })
181 #endif
182 
183 #ifndef sync_internal_yield_until_before
184 /*! \brief   yield to other processing until some time before the requested time
185  *  \ingroup lock_core
186  *
187  * This method is provided for cases where the caller has no useful work to do
188  * until the specified time.
189  *
190  * By default this method does nothing, however it can be overridden (for example by an
191  * RTOS which is able to block the current task until the scheduler tick before
192  * the given time)
193  *
194  * \param until the \ref absolute_time_t value
195  */
196 #define sync_internal_yield_until_before(until) ((void)0)
197 #endif
198 
199 #endif
200