Lines Matching +full:lock +full:- +full:- +full:- +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
17 * See Documentation/locking/rt-mutex-design.rst for details.
27 #include <trace/events/lock.h>
36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
66 * lock->owner state tracking:
68 * lock->owner holds the task_struct pointer of the owner. Bit 0
69 * is used to keep track of the "lock has waiters" state.
72 * NULL 0 lock is free (fast acquire possible)
73 * NULL 1 lock is free and has waiters and the top waiter
74 * is going to take the lock*
75 * taskpointer 0 lock is held (fast release possible)
76 * taskpointer 1 lock is held and has waiters**
79 * possible when bit 0 of lock->owner is 0.
81 * (*) It also can be a transitional state when grabbing the lock
82 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
83 * we need to set the bit0 before looking at the lock, and the owner may be
87 * waiters. This can happen when grabbing the lock in the slow path.
88 * To prevent a cmpxchg of the owner releasing the lock, we need to
89 * set this bit before looking at the lock.
93 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
97 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner()
100 WRITE_ONCE(lock->owner, (struct task_struct *)val); in rt_mutex_set_owner()
103 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) in clear_rt_mutex_waiters() argument
105 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
106 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
109 static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock) in fixup_rt_mutex_waiters() argument
111 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
113 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
118 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
122 * l->owner=T1 in fixup_rt_mutex_waiters()
124 * lock(l->lock) in fixup_rt_mutex_waiters()
125 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
128 * unlock(l->lock) in fixup_rt_mutex_waiters()
132 * lock(l->lock) in fixup_rt_mutex_waiters()
133 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
136 * unlock(l->lock) in fixup_rt_mutex_waiters()
138 * signal(->T2) signal(->T3) in fixup_rt_mutex_waiters()
139 * lock(l->lock) in fixup_rt_mutex_waiters()
142 * unlock(l->lock) in fixup_rt_mutex_waiters()
143 * lock(l->lock) in fixup_rt_mutex_waiters()
147 * unlock(l->lock) in fixup_rt_mutex_waiters()
148 * lock(l->lock) in fixup_rt_mutex_waiters()
151 * l->owner = owner in fixup_rt_mutex_waiters()
152 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
153 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
155 * lock(l->lock) in fixup_rt_mutex_waiters()
158 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
159 * cmpxchg(l->owner, T1, NULL) in fixup_rt_mutex_waiters()
160 * ===> Success (l->owner = NULL) in fixup_rt_mutex_waiters()
162 * l->owner = owner in fixup_rt_mutex_waiters()
163 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
168 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
169 * bit. If the bit is set then nothing can change l->owner either in fixup_rt_mutex_waiters()
184 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
188 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
191 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
195 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
199 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
200 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
203 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
205 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
216 * 2) Drop lock->wait_lock
217 * 3) Try to unlock the lock with cmpxchg
219 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
221 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
223 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
225 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
226 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
232 * lock(wait_lock); in unlock_rt_mutex_safe()
234 * mark_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
235 * acquire(lock); in unlock_rt_mutex_safe()
239 * lock(wait_lock); in unlock_rt_mutex_safe()
240 * mark_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
245 * lock(wait_lock); in unlock_rt_mutex_safe()
248 * lock(wait_lock); in unlock_rt_mutex_safe()
249 * acquire(lock); in unlock_rt_mutex_safe()
251 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
255 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
263 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
270 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
272 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
273 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
277 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
279 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
281 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
283 lock->owner = NULL; in unlock_rt_mutex_safe()
284 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
291 int prio = task->prio; in __waiter_prio()
302 waiter->prio = __waiter_prio(task); in waiter_update_prio()
303 waiter->deadline = task->dl.deadline; in waiter_update_prio()
310 &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
315 if (left->prio < right->prio) in rt_mutex_waiter_less()
324 if (dl_prio(left->prio)) in rt_mutex_waiter_less()
325 return dl_time_before(left->deadline, right->deadline); in rt_mutex_waiter_less()
333 if (left->prio != right->prio) in rt_mutex_waiter_equal()
342 if (dl_prio(left->prio)) in rt_mutex_waiter_equal()
343 return left->deadline == right->deadline; in rt_mutex_waiter_equal()
359 if (rt_prio(waiter->prio) || dl_prio(waiter->prio)) in rt_mutex_steal()
385 /* NOTE: relies on waiter->ww_ctx being set before insertion */ in __waiter_less()
386 if (aw->ww_ctx) { in __waiter_less()
387 if (!bw->ww_ctx) in __waiter_less()
390 return (signed long)(aw->ww_ctx->stamp - in __waiter_less()
391 bw->ww_ctx->stamp) < 0; in __waiter_less()
398 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
400 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
404 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
406 if (RB_EMPTY_NODE(&waiter->tree_entry)) in rt_mutex_dequeue()
409 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
410 RB_CLEAR_NODE(&waiter->tree_entry); in rt_mutex_dequeue()
425 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); in rt_mutex_enqueue_pi()
431 if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) in rt_mutex_dequeue_pi()
434 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
435 RB_CLEAR_NODE(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
442 lockdep_assert_held(&p->pi_lock); in rt_mutex_adjust_prio()
445 pi_task = task_top_pi_waiter(p)->task; in rt_mutex_adjust_prio()
457 WARN_ON_ONCE(wqh->rtlock_task); in rt_mutex_wake_q_add_task()
459 wqh->rtlock_task = task; in rt_mutex_wake_q_add_task()
461 wake_q_add(&wqh->head, task); in rt_mutex_wake_q_add_task()
468 rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state); in rt_mutex_wake_q_add()
473 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { in rt_mutex_wake_up_q()
474 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); in rt_mutex_wake_up_q()
475 put_task_struct(wqh->rtlock_task); in rt_mutex_wake_up_q()
476 wqh->rtlock_task = NULL; in rt_mutex_wake_up_q()
479 if (!wake_q_empty(&wqh->head)) in rt_mutex_wake_up_q()
480 wake_up_q(&wqh->head); in rt_mutex_wake_up_q()
510 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
515 * Decreases task's usage by one - may thus free the task.
525 * comparison to detect lock chain changes.
532 * Returns 0 or -EDEADLK.
537 * [P] task->pi_lock held
538 * [L] rtmutex->wait_lock held
554 * [1] lock(task->pi_lock); [R] acquire [P]
555 * [2] waiter = task->pi_blocked_on; [P]
557 * [4] lock = waiter->lock; [P]
558 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
559 * unlock(task->pi_lock); release [P]
563 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
564 * [8] unlock(task->pi_lock); release [P]
567 * [10] task = owner(lock); [L]
569 * lock(task->pi_lock); [L] acquire [P]
570 * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
572 * [13] unlock(task->pi_lock); release [P]
573 * unlock(lock->wait_lock); release [L]
586 struct rt_mutex_base *lock; in rt_mutex_adjust_prio_chain() local
600 * We limit the lock chain length for each invocation. in rt_mutex_adjust_prio_chain()
611 printk(KERN_WARNING "Maximum lock depth %d reached " in rt_mutex_adjust_prio_chain()
613 top_task->comm, task_pid_nr(top_task)); in rt_mutex_adjust_prio_chain()
617 return -EDEADLK; in rt_mutex_adjust_prio_chain()
630 raw_spin_lock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
635 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
638 * [3] check_exit_conditions_1() protected by task->pi_lock. in rt_mutex_adjust_prio_chain()
651 * the previous owner of the lock might have released the lock. in rt_mutex_adjust_prio_chain()
658 * the task might have moved on in the lock chain or even left in rt_mutex_adjust_prio_chain()
659 * the chain completely and blocks now on an unrelated lock or in rt_mutex_adjust_prio_chain()
662 * We stored the lock on which @task was blocked in @next_lock, in rt_mutex_adjust_prio_chain()
665 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
669 * There could be 'spurious' loops in the lock graph due to ww_mutex, in rt_mutex_adjust_prio_chain()
676 * P3 should not return -EDEADLK because it gets trapped in the cycle in rt_mutex_adjust_prio_chain()
677 * created by P1 and P2 (which will resolve -- and runs into in rt_mutex_adjust_prio_chain()
690 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) in rt_mutex_adjust_prio_chain()
730 * [4] Get the next lock in rt_mutex_adjust_prio_chain()
732 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
734 * [5] We need to trylock here as we are holding task->pi_lock, in rt_mutex_adjust_prio_chain()
735 * which is the reverse lock order versus the other rtmutex in rt_mutex_adjust_prio_chain()
738 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
739 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
745 * [6] check_exit_conditions_2() protected by task->pi_lock and in rt_mutex_adjust_prio_chain()
746 * lock->wait_lock. in rt_mutex_adjust_prio_chain()
748 * Deadlock detection. If the lock is the same as the original in rt_mutex_adjust_prio_chain()
749 * lock which caused us to walk the lock chain or if the in rt_mutex_adjust_prio_chain()
750 * current lock is owned by the task which initiated the chain in rt_mutex_adjust_prio_chain()
753 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
754 ret = -EDEADLK; in rt_mutex_adjust_prio_chain()
759 * logic pick which of the contending threads gets -EDEADLK. in rt_mutex_adjust_prio_chain()
765 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) in rt_mutex_adjust_prio_chain()
768 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
773 * If we just follow the lock chain for deadlock detection, no in rt_mutex_adjust_prio_chain()
782 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
786 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
787 * If there is no owner of the lock, end of chain. in rt_mutex_adjust_prio_chain()
789 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
790 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
794 /* [10] Grab the next task, i.e. owner of @lock */ in rt_mutex_adjust_prio_chain()
795 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
796 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
808 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
811 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
812 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
822 * operation on @lock. We need it for the boost/deboost in rt_mutex_adjust_prio_chain()
825 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
827 /* [7] Requeue the waiter in the lock waiter tree. */ in rt_mutex_adjust_prio_chain()
828 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
848 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
851 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
855 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
857 * We must abort the chain walk if there is no lock owner even in rt_mutex_adjust_prio_chain()
858 * in the dead lock detection case, as we have nothing to in rt_mutex_adjust_prio_chain()
861 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
865 * to get the lock. in rt_mutex_adjust_prio_chain()
867 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) in rt_mutex_adjust_prio_chain()
868 wake_up_state(waiter->task, waiter->wake_state); in rt_mutex_adjust_prio_chain()
869 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
873 /* [10] Grab the next task, i.e. the owner of @lock */ in rt_mutex_adjust_prio_chain()
874 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
875 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
878 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
881 * waiter on the lock. Replace the previous top waiter in rt_mutex_adjust_prio_chain()
891 * The waiter was the top waiter on the lock, but is in rt_mutex_adjust_prio_chain()
901 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
912 * [12] check_exit_conditions_4() protected by task->pi_lock in rt_mutex_adjust_prio_chain()
913 * and lock->wait_lock. The actual decisions are made after we in rt_mutex_adjust_prio_chain()
916 * Check whether the task which owns the current lock is pi in rt_mutex_adjust_prio_chain()
917 * blocked itself. If yes we store a pointer to the lock for in rt_mutex_adjust_prio_chain()
918 * the lock chain change detection above. After we dropped in rt_mutex_adjust_prio_chain()
919 * task->pi_lock next_lock cannot be dereferenced anymore. in rt_mutex_adjust_prio_chain()
923 * Store the top waiter of @lock for the end of chain walk in rt_mutex_adjust_prio_chain()
926 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
929 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
930 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
936 * We reached the end of the lock chain. Stop right here. No in rt_mutex_adjust_prio_chain()
943 * If the current waiter is not the top waiter on the lock, in rt_mutex_adjust_prio_chain()
953 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
961 * Try to take an rt-mutex
963 * Must be called with lock->wait_lock held and interrupts disabled
965 * @lock: The lock to be acquired.
966 * @task: The task which wants to acquire the lock
967 * @waiter: The waiter that is queued to the lock's wait tree if the
971 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
974 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
977 * Before testing whether we can acquire @lock, we set the in try_to_take_rt_mutex()
978 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
979 * other tasks which try to modify @lock into the slow path in try_to_take_rt_mutex()
980 * and they serialize on @lock->wait_lock. in try_to_take_rt_mutex()
985 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
986 * transient state if it does a trylock or leaves the lock in try_to_take_rt_mutex()
989 * - @task acquires the lock and there are no other in try_to_take_rt_mutex()
993 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
996 * If @lock has an owner, give up. in try_to_take_rt_mutex()
998 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
1003 * into @lock waiter tree. If @waiter == NULL then this is a in try_to_take_rt_mutex()
1007 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); in try_to_take_rt_mutex()
1010 * If waiter is the highest priority waiter of @lock, in try_to_take_rt_mutex()
1015 * We can acquire the lock. Remove the waiter from the in try_to_take_rt_mutex()
1016 * lock waiters tree. in try_to_take_rt_mutex()
1018 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1024 * If the lock has waiters already we check whether @task is in try_to_take_rt_mutex()
1025 * eligible to take over the lock. in try_to_take_rt_mutex()
1028 * the lock. @task->pi_blocked_on is NULL, so it does in try_to_take_rt_mutex()
1031 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
1034 rt_mutex_top_waiter(lock))) in try_to_take_rt_mutex()
1039 * don't have to change anything in the lock in try_to_take_rt_mutex()
1044 * No waiters. Take the lock without the in try_to_take_rt_mutex()
1045 * pi_lock dance.@task->pi_blocked_on is NULL in try_to_take_rt_mutex()
1054 * Clear @task->pi_blocked_on. Requires protection by in try_to_take_rt_mutex()
1055 * @task->pi_lock. Redundant operation for the @waiter == NULL in try_to_take_rt_mutex()
1059 raw_spin_lock(&task->pi_lock); in try_to_take_rt_mutex()
1060 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
1062 * Finish the lock acquisition. @task is the new owner. If in try_to_take_rt_mutex()
1064 * waiter into @task->pi_waiters tree. in try_to_take_rt_mutex()
1066 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
1067 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
1068 raw_spin_unlock(&task->pi_lock); in try_to_take_rt_mutex()
1075 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
1081 * Task blocks on lock.
1085 * This must be called with lock->wait_lock held and interrupts disabled
1087 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, in task_blocks_on_rt_mutex() argument
1093 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
1098 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1105 * the other will detect the deadlock and return -EDEADLOCK, in task_blocks_on_rt_mutex()
1113 return -EDEADLK; in task_blocks_on_rt_mutex()
1115 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
1116 waiter->task = task; in task_blocks_on_rt_mutex()
1117 waiter->lock = lock; in task_blocks_on_rt_mutex()
1120 /* Get the top priority waiter on the lock */ in task_blocks_on_rt_mutex()
1121 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
1122 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
1123 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1125 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
1127 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
1133 rtm = container_of(lock, struct rt_mutex, rtmutex); in task_blocks_on_rt_mutex()
1136 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
1137 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1138 task->pi_blocked_on = NULL; in task_blocks_on_rt_mutex()
1139 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
1147 raw_spin_lock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1148 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1153 if (owner->pi_blocked_on) in task_blocks_on_rt_mutex()
1159 /* Store the lock on which owner is blocked or NULL */ in task_blocks_on_rt_mutex()
1162 raw_spin_unlock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1172 * The owner can't disappear while holding a lock, in task_blocks_on_rt_mutex()
1178 raw_spin_unlock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1180 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1183 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1192 * Called with lock->wait_lock held and interrupts disabled.
1195 struct rt_mutex_base *lock) in mark_wakeup_next_waiter() argument
1199 raw_spin_lock(&current->pi_lock); in mark_wakeup_next_waiter()
1201 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1204 * Remove it from current->pi_waiters and deboost. in mark_wakeup_next_waiter()
1207 * rt_mutex_setprio() to update p->pi_top_task before the in mark_wakeup_next_waiter()
1215 * queued on the lock until it gets the lock, this lock in mark_wakeup_next_waiter()
1219 * the top waiter can steal this lock. in mark_wakeup_next_waiter()
1221 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1226 * p->pi_top_task pointer points to a blocked task). This however can in mark_wakeup_next_waiter()
1235 raw_spin_unlock(&current->pi_lock); in mark_wakeup_next_waiter()
1238 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) in __rt_mutex_slowtrylock() argument
1240 int ret = try_to_take_rt_mutex(lock, current, NULL); in __rt_mutex_slowtrylock()
1243 * try_to_take_rt_mutex() sets the lock waiters bit in __rt_mutex_slowtrylock()
1246 fixup_rt_mutex_waiters(lock); in __rt_mutex_slowtrylock()
1252 * Slow path try-lock function:
1254 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) in rt_mutex_slowtrylock() argument
1260 * If the lock already has an owner we fail to get the lock. in rt_mutex_slowtrylock()
1261 * This can be done without taking the @lock->wait_lock as in rt_mutex_slowtrylock()
1264 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1268 * The mutex has currently no owner. Lock the wait lock and try to in rt_mutex_slowtrylock()
1269 * acquire the lock. We use irqsave here to support early boot calls. in rt_mutex_slowtrylock()
1271 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1273 ret = __rt_mutex_slowtrylock(lock); in rt_mutex_slowtrylock()
1275 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1280 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) in __rt_mutex_trylock() argument
1282 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_trylock()
1285 return rt_mutex_slowtrylock(lock); in __rt_mutex_trylock()
1289 * Slow path to release a rt-mutex.
1291 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) in rt_mutex_slowunlock() argument
1297 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1299 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1306 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1307 * rtmutex_lock(foo->lock); <- fast path in rt_mutex_slowunlock()
1308 * free = atomic_dec_and_test(foo->refcnt); in rt_mutex_slowunlock()
1309 * rtmutex_unlock(foo->lock); <- fast path in rt_mutex_slowunlock()
1312 * raw_spin_unlock(foo->lock->wait_lock); in rt_mutex_slowunlock()
1317 * lock->wait_lock. So we do the following sequence: in rt_mutex_slowunlock()
1319 * owner = rt_mutex_owner(lock); in rt_mutex_slowunlock()
1320 * clear_rt_mutex_waiters(lock); in rt_mutex_slowunlock()
1321 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1322 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1327 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1329 * lock->owner = NULL; in rt_mutex_slowunlock()
1330 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1332 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1333 /* Drops lock->wait_lock ! */ in rt_mutex_slowunlock()
1334 if (unlock_rt_mutex_safe(lock, flags) == true) in rt_mutex_slowunlock()
1337 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1346 mark_wakeup_next_waiter(&wqh, lock); in rt_mutex_slowunlock()
1347 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1352 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) in __rt_mutex_unlock() argument
1354 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) in __rt_mutex_unlock()
1357 rt_mutex_slowunlock(lock); in __rt_mutex_unlock()
1361 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1370 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1374 * the lock owner still matches @owner. If that fails, in rtmutex_spin_on_owner()
1381 * - the lock owner has been scheduled out in rtmutex_spin_on_owner()
1382 * - current is not longer the top waiter in rtmutex_spin_on_owner()
1383 * - current is requested to reschedule (redundant in rtmutex_spin_on_owner()
1385 * - the VCPU on which owner runs is preempted in rtmutex_spin_on_owner()
1388 !rt_mutex_waiter_is_top_waiter(lock, waiter)) { in rtmutex_spin_on_owner()
1398 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1409 * - rtmutex, futex on all kernels
1410 * - mutex and rwsem substitutions on RT kernels
1414 * Remove a waiter from a lock and give up
1416 * Must be called with lock->wait_lock held and interrupts disabled. It must
1419 static void __sched remove_waiter(struct rt_mutex_base *lock, in remove_waiter() argument
1422 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1423 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1426 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1428 raw_spin_lock(&current->pi_lock); in remove_waiter()
1429 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1430 current->pi_blocked_on = NULL; in remove_waiter()
1431 raw_spin_unlock(&current->pi_lock); in remove_waiter()
1435 * waiter of the lock and there is an owner to update. in remove_waiter()
1440 raw_spin_lock(&owner->pi_lock); in remove_waiter()
1444 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1445 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1449 /* Store the lock on which owner is blocked or NULL */ in remove_waiter()
1452 raw_spin_unlock(&owner->pi_lock); in remove_waiter()
1464 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1466 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1469 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1473 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
1474 * @lock: the rt_mutex to take
1478 * @timeout: the pre-initialized and started timer, or NULL for none
1479 * @waiter: the pre-initialized rt_mutex_waiter
1481 * Must be called with lock->wait_lock held and interrupts disabled
1483 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, in rt_mutex_slowlock_block() argument
1489 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in rt_mutex_slowlock_block()
1494 /* Try to acquire the lock: */ in rt_mutex_slowlock_block()
1495 if (try_to_take_rt_mutex(lock, current, waiter)) in rt_mutex_slowlock_block()
1498 if (timeout && !timeout->task) { in rt_mutex_slowlock_block()
1499 ret = -ETIMEDOUT; in rt_mutex_slowlock_block()
1503 ret = -EINTR; in rt_mutex_slowlock_block()
1513 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1514 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1517 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1519 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1522 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1534 * If the result is not -EDEADLOCK or the caller requested in rt_mutex_handle_deadlock()
1537 if (res != -EDEADLOCK || detect_deadlock) in rt_mutex_handle_deadlock()
1540 if (build_ww_mutex() && w->ww_ctx) in rt_mutex_handle_deadlock()
1554 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1555 * @lock: The rtmutex to block lock
1561 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, in __rt_mutex_slowlock() argument
1567 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in __rt_mutex_slowlock()
1571 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1573 /* Try to acquire the lock again: */ in __rt_mutex_slowlock()
1574 if (try_to_take_rt_mutex(lock, current, NULL)) { in __rt_mutex_slowlock()
1584 trace_contention_begin(lock, LCB_F_RT); in __rt_mutex_slowlock()
1586 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); in __rt_mutex_slowlock()
1588 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); in __rt_mutex_slowlock()
1591 /* acquired the lock */ in __rt_mutex_slowlock()
1593 if (!ww_ctx->is_wait_die) in __rt_mutex_slowlock()
1599 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1607 fixup_rt_mutex_waiters(lock); in __rt_mutex_slowlock()
1609 trace_contention_end(lock, ret); in __rt_mutex_slowlock()
1614 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, in __rt_mutex_slowlock_locked() argument
1624 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, in __rt_mutex_slowlock_locked()
1632 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
1633 * @lock: The rtmutex to block lock
1637 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, in rt_mutex_slowlock() argument
1648 * rtmutex with lock->wait_lock held. But we cannot unconditionally in rt_mutex_slowlock()
1652 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1653 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); in rt_mutex_slowlock()
1654 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1659 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, in __rt_mutex_lock() argument
1662 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_lock()
1665 return rt_mutex_slowlock(lock, NULL, state); in __rt_mutex_lock()
1675 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1676 * @lock: The underlying RT mutex
1678 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) in rtlock_slowlock_locked() argument
1683 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1685 if (try_to_take_rt_mutex(lock, current, NULL)) in rtlock_slowlock_locked()
1693 trace_contention_begin(lock, LCB_F_RT); in rtlock_slowlock_locked()
1695 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); in rtlock_slowlock_locked()
1698 /* Try to acquire the lock again */ in rtlock_slowlock_locked()
1699 if (try_to_take_rt_mutex(lock, current, &waiter)) in rtlock_slowlock_locked()
1702 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1703 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1706 raw_spin_unlock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1708 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()
1711 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1722 fixup_rt_mutex_waiters(lock); in rtlock_slowlock_locked()
1725 trace_contention_end(lock, 0); in rtlock_slowlock_locked()
1728 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) in rtlock_slowlock() argument
1732 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1733 rtlock_slowlock_locked(lock); in rtlock_slowlock()
1734 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rtlock_slowlock()