1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Public interface for spinlocks
10 */
11
12 #ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13 #define ZEPHYR_INCLUDE_SPINLOCK_H_
14
15 #include <zephyr/sys/atomic.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/time_units.h>
18 #include <stdbool.h>
19 #include <zephyr/arch/cpu.h>
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 /**
26 * @brief Spinlock APIs
27 * @defgroup spinlock_apis Spinlock APIs
28 * @ingroup kernel_apis
29 * @{
30 */
31
32 struct z_spinlock_key {
33 int key;
34 };
35
36 /**
37 * @brief Kernel Spin Lock
38 *
39 * This struct defines a spin lock record on which CPUs can wait with
40 * k_spin_lock(). Any number of spinlocks may be defined in
41 * application code.
42 */
43 struct k_spinlock {
44 #ifdef CONFIG_SMP
45 atomic_t locked;
46 #endif
47
48 #ifdef CONFIG_SPIN_VALIDATE
49 /* Stores the thread that holds the lock with the locking CPU
50 * ID in the bottom two bits.
51 */
52 uintptr_t thread_cpu;
53 #ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
54 /* Stores the time (in cycles) when a lock was taken
55 */
56 uint32_t lock_time;
57 #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
58 #endif /* CONFIG_SPIN_VALIDATE */
59
60 #if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
61 !defined(CONFIG_SPIN_VALIDATE)
62 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
63 * the k_spinlock struct will have no members. The result
64 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
65 *
66 * This size difference causes problems when the k_spinlock
67 * is embedded into another struct like k_msgq, because C and
68 * C++ will have different ideas on the offsets of the members
69 * that come after the k_spinlock member.
70 *
71 * To prevent this we add a 1 byte dummy member to k_spinlock
72 * when the user selects C++ support and k_spinlock would
73 * otherwise be empty.
74 */
75 char dummy;
76 #endif
77 };
78
79 /* There's a spinlock validation framework available when asserts are
80 * enabled. It adds a relatively hefty overhead (about 3k or so) to
81 * kernel code size, don't use on platforms known to be small.
82 */
83 #ifdef CONFIG_SPIN_VALIDATE
84 bool z_spin_lock_valid(struct k_spinlock *l);
85 bool z_spin_unlock_valid(struct k_spinlock *l);
86 void z_spin_lock_set_owner(struct k_spinlock *l);
87 BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
88
89 # ifdef CONFIG_KERNEL_COHERENCE
90 bool z_spin_lock_mem_coherent(struct k_spinlock *l);
91 # endif /* CONFIG_KERNEL_COHERENCE */
92
93 #endif /* CONFIG_SPIN_VALIDATE */
94
95 /**
96 * @brief Spinlock key type
97 *
98 * This type defines a "key" value used by a spinlock implementation
99 * to store the system interrupt state at the time of a call to
100 * k_spin_lock(). It is expected to be passed to a matching
101 * k_spin_unlock().
102 *
103 * This type is opaque and should not be inspected by application
104 * code.
105 */
106 typedef struct z_spinlock_key k_spinlock_key_t;
107
108 /**
109 * @brief Lock a spinlock
110 *
111 * This routine locks the specified spinlock, returning a key handle
112 * representing interrupt state needed at unlock time. Upon
113 * returning, the calling thread is guaranteed not to be suspended or
114 * interrupted on its current CPU until it calls k_spin_unlock(). The
115 * implementation guarantees mutual exclusion: exactly one thread on
116 * one CPU will return from k_spin_lock() at a time. Other CPUs
117 * trying to acquire a lock already held by another CPU will enter an
118 * implementation-defined busy loop ("spinning") until the lock is
119 * released.
120 *
121 * Separate spin locks may be nested. It is legal to lock an
122 * (unlocked) spin lock while holding a different lock. Spin locks
123 * are not recursive, however: an attempt to acquire a spin lock that
124 * the CPU already holds will deadlock.
125 *
126 * In circumstances where only one CPU exists, the behavior of
127 * k_spin_lock() remains as specified above, though obviously no
128 * spinning will take place. Implementations may be free to optimize
129 * in uniprocessor contexts such that the locking reduces to an
130 * interrupt mask operation.
131 *
132 * @param l A pointer to the spinlock to lock
133 * @return A key value that must be passed to k_spin_unlock() when the
134 * lock is released.
135 */
k_spin_lock(struct k_spinlock * l)136 static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
137 {
138 ARG_UNUSED(l);
139 k_spinlock_key_t k;
140
141 /* Note that we need to use the underlying arch-specific lock
142 * implementation. The "irq_lock()" API in SMP context is
143 * actually a wrapper for a global spinlock!
144 */
145 k.key = arch_irq_lock();
146
147 #ifdef CONFIG_SPIN_VALIDATE
148 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
149 # ifdef CONFIG_KERNEL_COHERENCE
150 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
151 # endif
152 #endif
153
154 #ifdef CONFIG_SMP
155 while (!atomic_cas(&l->locked, 0, 1)) {
156 arch_spin_relax();
157 }
158 #endif
159
160 #ifdef CONFIG_SPIN_VALIDATE
161 z_spin_lock_set_owner(l);
162 #if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
163 l->lock_time = sys_clock_cycle_get_32();
164 #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
165 #endif/* CONFIG_SPIN_VALIDATE */
166 return k;
167 }
168
169 /**
170 * @brief Unlock a spin lock
171 *
172 * This releases a lock acquired by k_spin_lock(). After this
173 * function is called, any CPU will be able to acquire the lock. If
174 * other CPUs are currently spinning inside k_spin_lock() waiting for
175 * this lock, exactly one of them will return synchronously with the
176 * lock held.
177 *
178 * Spin locks must be properly nested. A call to k_spin_unlock() must
179 * be made on the lock object most recently locked using
180 * k_spin_lock(), using the key value that it returned. Attempts to
181 * unlock mis-nested locks, or to unlock locks that are not held, or
182 * to passing a key parameter other than the one returned from
183 * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some
184 * of these errors can be detected by the framework.
185 *
186 * @param l A pointer to the spinlock to release
187 * @param key The value returned from k_spin_lock() when this lock was
188 * acquired
189 */
k_spin_unlock(struct k_spinlock * l,k_spinlock_key_t key)190 static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
191 k_spinlock_key_t key)
192 {
193 ARG_UNUSED(l);
194 #ifdef CONFIG_SPIN_VALIDATE
195 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
196
197 #if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
198 uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
199
200 __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
201 "Spin lock %p held %u cycles, longer than limit of %u cycles",
202 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
203 #endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
204 #endif /* CONFIG_SPIN_VALIDATE */
205
206 #ifdef CONFIG_SMP
207 /* Strictly we don't need atomic_clear() here (which is an
208 * exchange operation that returns the old value). We are always
209 * setting a zero and (because we hold the lock) know the existing
210 * state won't change due to a race. But some architectures need
211 * a memory barrier when used like this, and we don't have a
212 * Zephyr framework for that.
213 */
214 atomic_clear(&l->locked);
215 #endif
216 arch_irq_unlock(key.key);
217 }
218
219 /* Internal function: releases the lock, but leaves local interrupts
220 * disabled
221 */
k_spin_release(struct k_spinlock * l)222 static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
223 {
224 ARG_UNUSED(l);
225 #ifdef CONFIG_SPIN_VALIDATE
226 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
227 #endif
228 #ifdef CONFIG_SMP
229 atomic_clear(&l->locked);
230 #endif
231 }
232
233 /** @} */
234
235 #ifdef __cplusplus
236 }
237 #endif
238
239 #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
240