Lines Matching +full:- +full:l
4 * SPDX-License-Identifier: Apache-2.0
56 * - current FIFO tail value is atomically incremented while it's
58 * - we spin until the FIFO head becomes equal to the ticket value
108 bool z_spin_lock_valid(struct k_spinlock *l);
109 bool z_spin_unlock_valid(struct k_spinlock *l);
110 void z_spin_lock_set_owner(struct k_spinlock *l);
114 bool z_spin_lock_mem_coherent(struct k_spinlock *l);
132 static ALWAYS_INLINE void z_spinlock_validate_pre(struct k_spinlock *l) in z_spinlock_validate_pre() argument
134 ARG_UNUSED(l); in z_spinlock_validate_pre()
136 __ASSERT(z_spin_lock_valid(l), "Invalid spinlock %p", l); in z_spinlock_validate_pre()
138 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l)); in z_spinlock_validate_pre()
143 static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l) in z_spinlock_validate_post() argument
145 ARG_UNUSED(l); in z_spinlock_validate_post()
147 z_spin_lock_set_owner(l); in z_spinlock_validate_post()
149 l->lock_time = sys_clock_cycle_get_32(); in z_spinlock_validate_post()
164 * implementation-defined busy loop ("spinning") until the lock is
178 * @param l A pointer to the spinlock to lock
182 static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) in k_spin_lock() argument
184 ARG_UNUSED(l); in k_spin_lock()
187 /* Note that we need to use the underlying arch-specific lock in k_spin_lock()
193 z_spinlock_validate_pre(l); in k_spin_lock()
200 atomic_val_t ticket = atomic_inc(&l->tail); in k_spin_lock()
202 while (atomic_get(&l->owner) != ticket) { in k_spin_lock()
206 while (!atomic_cas(&l->locked, 0, 1)) { in k_spin_lock()
211 z_spinlock_validate_post(l); in k_spin_lock()
219 * This routine makes one attempt to lock @p l. If it is successful, then
222 * @param[in] l A pointer to the spinlock to lock
225 * @retval -EBUSY if another thread holds the lock
230 static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k) in k_spin_trylock() argument
234 z_spinlock_validate_pre(l); in k_spin_trylock()
244 * - spinlock needs to be taken 0xffff_..._ffff + 1 times in k_spin_trylock()
247 * - spinlock needs to be taken and released 0xffff_..._ffff times and in k_spin_trylock()
250 * In real-life systems this is considered non-reproducible given that in k_spin_trylock()
255 atomic_val_t ticket_val = atomic_get(&l->owner); in k_spin_trylock()
257 if (!atomic_cas(&l->tail, ticket_val, ticket_val + 1)) { in k_spin_trylock()
261 if (!atomic_cas(&l->locked, 0, 1)) { in k_spin_trylock()
266 z_spinlock_validate_post(l); in k_spin_trylock()
268 k->key = key; in k_spin_trylock()
275 return -EBUSY; in k_spin_trylock()
291 * unlock mis-nested locks, or to unlock locks that are not held, or
296 * @param l A pointer to the spinlock to release
300 static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, in k_spin_unlock() argument
303 ARG_UNUSED(l); in k_spin_unlock()
305 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l); in k_spin_unlock()
308 uint32_t delta = sys_clock_cycle_get_32() - l->lock_time; in k_spin_unlock()
312 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT); in k_spin_unlock()
319 (void)atomic_inc(&l->owner); in k_spin_unlock()
328 (void)atomic_clear(&l->locked); in k_spin_unlock()
343 * @param l A pointer to the spinlock
344 * @retval true - if spinlock is held by some CPU; false - otherwise
346 static ALWAYS_INLINE bool z_spin_is_locked(struct k_spinlock *l) in z_spin_is_locked() argument
349 atomic_val_t ticket_val = atomic_get(&l->owner); in z_spin_is_locked()
351 return !atomic_cas(&l->tail, ticket_val, ticket_val); in z_spin_is_locked()
353 return l->locked; in z_spin_is_locked()
359 static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) in k_spin_release() argument
361 ARG_UNUSED(l); in k_spin_release()
363 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l); in k_spin_release()
367 (void)atomic_inc(&l->owner); in k_spin_release()
369 (void)atomic_clear(&l->locked); in k_spin_release()
377 __ASSERT(k->key, "K_SPINLOCK exited with goto, break or return, " in z_spin_onexit()
420 * Behind the scenes this pattern expands to a for-loop whose body is executed