Lines Matching refs:owner
41 atomic_long_set(&lock->owner, 0); in __mutex_init()
67 static inline struct task_struct *__owner_task(unsigned long owner) in __owner_task() argument
69 return (struct task_struct *)(owner & ~MUTEX_FLAGS); in __owner_task()
72 static inline unsigned long __owner_flags(unsigned long owner) in __owner_flags() argument
74 return owner & MUTEX_FLAGS; in __owner_flags()
82 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_or_owner() local
84 owner = atomic_long_read(&lock->owner); in __mutex_trylock_or_owner()
86 unsigned long old, flags = __owner_flags(owner); in __mutex_trylock_or_owner()
87 unsigned long task = owner & ~MUTEX_FLAGS; in __mutex_trylock_or_owner()
110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); in __mutex_trylock_or_owner()
111 if (old == owner) in __mutex_trylock_or_owner()
114 owner = old; in __mutex_trylock_or_owner()
117 return __owner_task(owner); in __mutex_trylock_or_owner()
144 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast()
154 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) in __mutex_unlock_fast()
163 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
168 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
199 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff() local
205 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); in __mutex_handoff()
206 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); in __mutex_handoff()
209 new = (owner & MUTEX_FLAG_WAITERS); in __mutex_handoff()
214 old = atomic_long_cmpxchg_release(&lock->owner, owner, new); in __mutex_handoff()
215 if (old == owner) in __mutex_handoff()
218 owner = old; in __mutex_handoff()
366 struct task_struct *owner = __mutex_owner(lock); in __ww_mutex_wound() local
383 if (!owner) in __ww_mutex_wound()
395 if (owner != current) in __ww_mutex_wound()
396 wake_up_process(owner); in __ww_mutex_wound()
460 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) in ww_mutex_set_context_fastpath()
503 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
523 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner() argument
529 while (__mutex_owner(lock) == owner) { in mutex_spin_on_owner()
541 if (!owner->on_cpu || need_resched() || in mutex_spin_on_owner()
542 vcpu_is_preempted(task_cpu(owner))) { in mutex_spin_on_owner()
564 struct task_struct *owner; in mutex_can_spin_on_owner() local
571 owner = __mutex_owner(lock); in mutex_can_spin_on_owner()
577 if (owner) in mutex_can_spin_on_owner()
578 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner()
635 struct task_struct *owner; in mutex_optimistic_spin() local
638 owner = __mutex_trylock_or_owner(lock); in mutex_optimistic_spin()
639 if (!owner) in mutex_optimistic_spin()
646 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) in mutex_optimistic_spin()
1195 unsigned long owner; in __mutex_unlock_slowpath() local
1206 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
1211 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); in __mutex_unlock_slowpath()
1212 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); in __mutex_unlock_slowpath()
1215 if (owner & MUTEX_FLAG_HANDOFF) in __mutex_unlock_slowpath()
1218 old = atomic_long_cmpxchg_release(&lock->owner, owner, in __mutex_unlock_slowpath()
1219 __owner_flags(owner)); in __mutex_unlock_slowpath()
1220 if (old == owner) { in __mutex_unlock_slowpath()
1221 if (owner & MUTEX_FLAG_WAITERS) in __mutex_unlock_slowpath()
1227 owner = old; in __mutex_unlock_slowpath()
1244 if (owner & MUTEX_FLAG_HANDOFF) in __mutex_unlock_slowpath()