Lines Matching full:lock

17  * Debug aware fast / slowpath lock,trylock,unlock
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument
29 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in __rt_mutex_lock_common()
30 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common()
32 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common()
44 * rt_mutex_lock_nested - lock a rt_mutex
46 * @lock: the rt_mutex to be locked
49 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument
51 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); in rt_mutex_lock_nested()
58 * rt_mutex_lock - lock a rt_mutex
60 * @lock: the rt_mutex to be locked
62 void __sched rt_mutex_lock(struct rt_mutex *lock) in rt_mutex_lock() argument
64 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); in rt_mutex_lock()
70 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
72 * @lock: the rt_mutex to be locked
78 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) in rt_mutex_lock_interruptible() argument
80 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); in rt_mutex_lock_interruptible()
85 * rt_mutex_trylock - try to lock a rt_mutex
87 * @lock: the rt_mutex to be locked
96 int __sched rt_mutex_trylock(struct rt_mutex *lock) in rt_mutex_trylock() argument
103 ret = __rt_mutex_trylock(&lock->rtmutex); in rt_mutex_trylock()
105 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in rt_mutex_trylock()
114 * @lock: the rt_mutex to be unlocked
116 void __sched rt_mutex_unlock(struct rt_mutex *lock) in rt_mutex_unlock() argument
118 mutex_release(&lock->dep_map, _RET_IP_); in rt_mutex_unlock()
119 __rt_mutex_unlock(&lock->rtmutex); in rt_mutex_unlock()
126 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock) in rt_mutex_futex_trylock() argument
128 return rt_mutex_slowtrylock(lock); in rt_mutex_futex_trylock()
131 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock) in __rt_mutex_futex_trylock() argument
133 return __rt_mutex_slowtrylock(lock); in __rt_mutex_futex_trylock()
140 * @lock: The rt_mutex to be unlocked
141 * @wqh: The wake queue head from which to get the next lock waiter
143 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock, in __rt_mutex_futex_unlock() argument
146 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_futex_unlock()
148 debug_rt_mutex_unlock(lock); in __rt_mutex_futex_unlock()
150 if (!rt_mutex_has_waiters(lock)) { in __rt_mutex_futex_unlock()
151 lock->owner = NULL; in __rt_mutex_futex_unlock()
161 mark_wakeup_next_waiter(wqh, lock); in __rt_mutex_futex_unlock()
166 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock) in rt_mutex_futex_unlock() argument
172 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_futex_unlock()
173 postunlock = __rt_mutex_futex_unlock(lock, &wqh); in rt_mutex_futex_unlock()
174 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_futex_unlock()
183 * @lock: The rt_mutex to be initialized
184 * @name: The lock name used for debugging
185 * @key: The lock class key used for debugging
191 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, in __rt_mutex_init() argument
194 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rt_mutex_init()
195 __rt_mutex_base_init(&lock->rtmutex); in __rt_mutex_init()
196 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); in __rt_mutex_init()
201 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
204 * @lock: the rt_mutex to be locked
214 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, in rt_mutex_init_proxy_locked() argument
219 __rt_mutex_base_init(lock); in rt_mutex_init_proxy_locked()
223 * some of the futex functions invoke spin_unlock(&hb->lock) with in rt_mutex_init_proxy_locked()
226 * the spinlock is based, which makes lockdep notice a lock in rt_mutex_init_proxy_locked()
229 lockdep_set_class(&lock->wait_lock, &pi_futex_key); in rt_mutex_init_proxy_locked()
230 rt_mutex_set_owner(lock, proxy_owner); in rt_mutex_init_proxy_locked()
234 * rt_mutex_proxy_unlock - release a lock on behalf of owner
236 * @lock: the rt_mutex to be locked
245 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock) in rt_mutex_proxy_unlock() argument
247 debug_rt_mutex_proxy_unlock(lock); in rt_mutex_proxy_unlock()
248 rt_mutex_set_owner(lock, NULL); in rt_mutex_proxy_unlock()
252 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
253 * @lock: the rt_mutex to take
264 * 0 - task blocked on lock
265 * 1 - acquired the lock for task, caller should wake it up
270 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, in __rt_mutex_start_proxy_lock() argument
276 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_start_proxy_lock()
278 if (try_to_take_rt_mutex(lock, task, NULL)) in __rt_mutex_start_proxy_lock()
282 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL, in __rt_mutex_start_proxy_lock()
285 if (ret && !rt_mutex_owner(lock)) { in __rt_mutex_start_proxy_lock()
289 * released the lock while we were walking the in __rt_mutex_start_proxy_lock()
299 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
300 * @lock: the rt_mutex to take
311 * 0 - task blocked on lock
312 * 1 - acquired the lock for task, caller should wake it up
317 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, in rt_mutex_start_proxy_lock() argument
323 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock()
324 ret = __rt_mutex_start_proxy_lock(lock, waiter, task); in rt_mutex_start_proxy_lock()
326 remove_waiter(lock, waiter); in rt_mutex_start_proxy_lock()
327 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock()
333 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
334 * @lock: the rt_mutex we were woken on
339 * Wait for the lock acquisition started on our behalf by
349 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, in rt_mutex_wait_proxy_lock() argument
355 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_wait_proxy_lock()
358 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter); in rt_mutex_wait_proxy_lock()
363 fixup_rt_mutex_waiters(lock); in rt_mutex_wait_proxy_lock()
364 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_wait_proxy_lock()
370 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
371 * @lock: the rt_mutex we were woken on
377 * Unless we acquired the lock; we're still enqueued on the wait-list and can
384 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
389 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, in rt_mutex_cleanup_proxy_lock() argument
394 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_cleanup_proxy_lock()
396 * Do an unconditional try-lock, this deals with the lock stealing in rt_mutex_cleanup_proxy_lock()
402 * we will own the lock and it will have removed the waiter. If we in rt_mutex_cleanup_proxy_lock()
406 try_to_take_rt_mutex(lock, current, waiter); in rt_mutex_cleanup_proxy_lock()
411 if (rt_mutex_owner(lock) != current) { in rt_mutex_cleanup_proxy_lock()
412 remove_waiter(lock, waiter); in rt_mutex_cleanup_proxy_lock()
419 fixup_rt_mutex_waiters(lock); in rt_mutex_cleanup_proxy_lock()
421 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_cleanup_proxy_lock()
444 next_lock = waiter->lock; in rt_mutex_adjust_pi()
480 static __always_inline int __mutex_lock_common(struct mutex *lock, in __mutex_lock_common() argument
489 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
490 ret = __rt_mutex_lock(&lock->rtmutex, state); in __mutex_lock_common()
492 mutex_release(&lock->dep_map, ip); in __mutex_lock_common()
494 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
499 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested() argument
501 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_nested()
505 void __sched _mutex_lock_nest_lock(struct mutex *lock, in _mutex_lock_nest_lock() argument
508 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_); in _mutex_lock_nest_lock()
512 int __sched mutex_lock_interruptible_nested(struct mutex *lock, in mutex_lock_interruptible_nested() argument
515 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_interruptible_nested()
519 int __sched mutex_lock_killable_nested(struct mutex *lock, in mutex_lock_killable_nested() argument
522 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); in mutex_lock_killable_nested()
526 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_io_nested() argument
533 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_io_nested()
540 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument
542 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in mutex_lock()
546 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible() argument
548 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); in mutex_lock_interruptible()
552 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable() argument
554 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); in mutex_lock_killable()
558 void __sched mutex_lock_io(struct mutex *lock) in mutex_lock_io() argument
562 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in mutex_lock_io()
568 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock() argument
575 ret = __rt_mutex_trylock(&lock->rtmutex); in mutex_trylock()
577 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in mutex_trylock()
583 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock() argument
585 mutex_release(&lock->dep_map, _RET_IP_); in mutex_unlock()
586 __rt_mutex_unlock(&lock->rtmutex); in mutex_unlock()