Lines Matching full:lock
6 * Simple spin lock operations.
34 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
36 return lock.slock == 0; in arch_spin_value_unlocked()
39 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
42 return !arch_spin_value_unlocked(*lock); in arch_spin_is_locked()
46 * This returns the old value in the lock, so we succeeded
47 * in getting the lock if the return value is 0.
49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument
63 : "r" (token), "r" (&lock->slock) in __arch_spin_trylock()
69 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
71 return __arch_spin_trylock(lock) == 0; in arch_spin_trylock()
77 * there is no point spinning on a lock if the holder of the lock
80 * rest of our timeslice to the lock holder.
82 * So that we can tell which virtual processor is holding a lock,
83 * we put 0x80000000 | smp_processor_id() in the lock when it is
90 void splpar_spin_yield(arch_spinlock_t *lock);
91 void splpar_rw_yield(arch_rwlock_t *lock);
93 static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; in splpar_spin_yield() argument
94 static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; in splpar_rw_yield() argument
97 static inline void spin_yield(arch_spinlock_t *lock) in spin_yield() argument
100 splpar_spin_yield(lock); in spin_yield()
105 static inline void rw_yield(arch_rwlock_t *lock) in rw_yield() argument
108 splpar_rw_yield(lock); in rw_yield()
113 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument
116 if (likely(__arch_spin_trylock(lock) == 0)) in arch_spin_lock()
121 splpar_spin_yield(lock); in arch_spin_lock()
122 } while (unlikely(lock->slock != 0)); in arch_spin_lock()
128 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) in arch_spin_lock_flags() argument
133 if (likely(__arch_spin_trylock(lock) == 0)) in arch_spin_lock_flags()
140 splpar_spin_yield(lock); in arch_spin_lock_flags()
141 } while (unlikely(lock->slock != 0)); in arch_spin_lock_flags()
148 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument
152 lock->slock = 0; in arch_spin_unlock()
162 * irq-safe write-lock, but readers can get non-irqsafe
175 * This returns the old value in the lock + 1,
176 * so we got a read lock if the return value is > 0.
191 : "r" (&rw->lock) in __arch_read_trylock()
198 * This returns the old value in the lock,
199 * so we got the write lock if the return value is 0.
214 : "r" (token), "r" (&rw->lock) in __arch_write_trylock()
229 } while (unlikely(rw->lock < 0)); in arch_read_lock()
243 } while (unlikely(rw->lock != 0)); in arch_write_lock()
270 : "r"(&rw->lock) in arch_read_unlock()
278 rw->lock = 0; in arch_write_unlock()
281 #define arch_spin_relax(lock) spin_yield(lock) argument
282 #define arch_read_relax(lock) rw_yield(lock) argument
283 #define arch_write_relax(lock) rw_yield(lock) argument