Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
17 * See Documentation/locking/rt-mutex-design.rst for details.
64 * lock->owner state tracking:
66 * lock->owner holds the task_struct pointer of the owner. Bit 0
77 * possible when bit 0 of lock->owner is 0.
80 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
98 WRITE_ONCE(lock->owner, (struct task_struct *)val); in rt_mutex_set_owner()
103 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
104 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
109 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
116 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
120 * l->owner=T1 in fixup_rt_mutex_waiters()
122 * lock(l->lock) in fixup_rt_mutex_waiters()
123 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
126 * unlock(l->lock) in fixup_rt_mutex_waiters()
130 * lock(l->lock) in fixup_rt_mutex_waiters()
131 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
134 * unlock(l->lock) in fixup_rt_mutex_waiters()
136 * signal(->T2) signal(->T3) in fixup_rt_mutex_waiters()
137 * lock(l->lock) in fixup_rt_mutex_waiters()
140 * unlock(l->lock) in fixup_rt_mutex_waiters()
141 * lock(l->lock) in fixup_rt_mutex_waiters()
145 * unlock(l->lock) in fixup_rt_mutex_waiters()
146 * lock(l->lock) in fixup_rt_mutex_waiters()
149 * l->owner = owner in fixup_rt_mutex_waiters()
150 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
151 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
153 * lock(l->lock) in fixup_rt_mutex_waiters()
156 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
157 * cmpxchg(l->owner, T1, NULL) in fixup_rt_mutex_waiters()
158 * ===> Success (l->owner = NULL) in fixup_rt_mutex_waiters()
160 * l->owner = owner in fixup_rt_mutex_waiters()
161 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
166 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
167 * bit. If the bit is set then nothing can change l->owner either in fixup_rt_mutex_waiters()
186 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
193 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
197 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
203 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
214 * 2) Drop lock->wait_lock
219 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
224 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
270 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
271 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
275 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
279 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
281 lock->owner = NULL; in unlock_rt_mutex_safe()
282 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
289 int prio = task->prio; in __waiter_prio()
300 waiter->prio = __waiter_prio(task); in waiter_update_prio()
301 waiter->deadline = task->dl.deadline; in waiter_update_prio()
308 &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
313 if (left->prio < right->prio) in rt_mutex_waiter_less()
322 if (dl_prio(left->prio)) in rt_mutex_waiter_less()
323 return dl_time_before(left->deadline, right->deadline); in rt_mutex_waiter_less()
331 if (left->prio != right->prio) in rt_mutex_waiter_equal()
340 if (dl_prio(left->prio)) in rt_mutex_waiter_equal()
341 return left->deadline == right->deadline; in rt_mutex_waiter_equal()
357 if (rt_prio(waiter->prio) || dl_prio(waiter->prio)) in rt_mutex_steal()
383 /* NOTE: relies on waiter->ww_ctx being set before insertion */ in __waiter_less()
384 if (aw->ww_ctx) { in __waiter_less()
385 if (!bw->ww_ctx) in __waiter_less()
388 return (signed long)(aw->ww_ctx->stamp - in __waiter_less()
389 bw->ww_ctx->stamp) < 0; in __waiter_less()
398 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
404 if (RB_EMPTY_NODE(&waiter->tree_entry)) in rt_mutex_dequeue()
407 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
408 RB_CLEAR_NODE(&waiter->tree_entry); in rt_mutex_dequeue()
423 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); in rt_mutex_enqueue_pi()
429 if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) in rt_mutex_dequeue_pi()
432 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
433 RB_CLEAR_NODE(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
440 lockdep_assert_held(&p->pi_lock); in rt_mutex_adjust_prio()
443 pi_task = task_top_pi_waiter(p)->task; in rt_mutex_adjust_prio()
452 if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) { in rt_mutex_wake_q_add()
454 WARN_ON_ONCE(wqh->rtlock_task); in rt_mutex_wake_q_add()
455 get_task_struct(w->task); in rt_mutex_wake_q_add()
456 wqh->rtlock_task = w->task; in rt_mutex_wake_q_add()
458 wake_q_add(&wqh->head, w->task); in rt_mutex_wake_q_add()
464 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { in rt_mutex_wake_up_q()
465 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); in rt_mutex_wake_up_q()
466 put_task_struct(wqh->rtlock_task); in rt_mutex_wake_up_q()
467 wqh->rtlock_task = NULL; in rt_mutex_wake_up_q()
470 if (!wake_q_empty(&wqh->head)) in rt_mutex_wake_up_q()
471 wake_up_q(&wqh->head); in rt_mutex_wake_up_q()
501 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
506 * Decreases task's usage by one - may thus free the task.
523 * Returns 0 or -EDEADLK.
528 * [P] task->pi_lock held
529 * [L] rtmutex->wait_lock held
545 * [1] lock(task->pi_lock); [R] acquire [P]
546 * [2] waiter = task->pi_blocked_on; [P]
548 * [4] lock = waiter->lock; [P]
549 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
550 * unlock(task->pi_lock); release [P]
555 * [8] unlock(task->pi_lock); release [P]
560 * lock(task->pi_lock); [L] acquire [P]
563 * [13] unlock(task->pi_lock); release [P]
564 * unlock(lock->wait_lock); release [L]
604 top_task->comm, task_pid_nr(top_task)); in rt_mutex_adjust_prio_chain()
608 return -EDEADLK; in rt_mutex_adjust_prio_chain()
621 raw_spin_lock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
626 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
629 * [3] check_exit_conditions_1() protected by task->pi_lock. in rt_mutex_adjust_prio_chain()
656 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
667 * P3 should not return -EDEADLK because it gets trapped in the cycle in rt_mutex_adjust_prio_chain()
668 * created by P1 and P2 (which will resolve -- and runs into in rt_mutex_adjust_prio_chain()
681 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) in rt_mutex_adjust_prio_chain()
723 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
725 * [5] We need to trylock here as we are holding task->pi_lock, in rt_mutex_adjust_prio_chain()
729 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
730 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
736 * [6] check_exit_conditions_2() protected by task->pi_lock and in rt_mutex_adjust_prio_chain()
737 * lock->wait_lock. in rt_mutex_adjust_prio_chain()
745 ret = -EDEADLK; in rt_mutex_adjust_prio_chain()
750 * logic pick which of the contending threads gets -EDEADLK. in rt_mutex_adjust_prio_chain()
756 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) in rt_mutex_adjust_prio_chain()
759 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
773 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
777 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
781 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
787 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
802 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
803 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
842 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
846 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
859 wake_up_state(waiter->task, waiter->wake_state); in rt_mutex_adjust_prio_chain()
860 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
866 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
903 * [12] check_exit_conditions_4() protected by task->pi_lock in rt_mutex_adjust_prio_chain()
904 * and lock->wait_lock. The actual decisions are made after we in rt_mutex_adjust_prio_chain()
910 * task->pi_lock next_lock cannot be dereferenced anymore. in rt_mutex_adjust_prio_chain()
920 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
921 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
944 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
952 * Try to take an rt-mutex
954 * Must be called with lock->wait_lock held and interrupts disabled
965 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
969 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
971 * and they serialize on @lock->wait_lock. in try_to_take_rt_mutex()
976 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
980 * - @task acquires the lock and there are no other in try_to_take_rt_mutex()
1019 * the lock. @task->pi_blocked_on is NULL, so it does in try_to_take_rt_mutex()
1036 * pi_lock dance.@task->pi_blocked_on is NULL in try_to_take_rt_mutex()
1045 * Clear @task->pi_blocked_on. Requires protection by in try_to_take_rt_mutex()
1046 * @task->pi_lock. Redundant operation for the @waiter == NULL in try_to_take_rt_mutex()
1050 raw_spin_lock(&task->pi_lock); in try_to_take_rt_mutex()
1051 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
1055 * waiter into @task->pi_waiters tree. in try_to_take_rt_mutex()
1059 raw_spin_unlock(&task->pi_lock); in try_to_take_rt_mutex()
1076 * This must be called with lock->wait_lock held and interrupts disabled
1089 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1096 * the other will detect the deadlock and return -EDEADLOCK, in task_blocks_on_rt_mutex()
1101 return -EDEADLK; in task_blocks_on_rt_mutex()
1103 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
1104 waiter->task = task; in task_blocks_on_rt_mutex()
1105 waiter->lock = lock; in task_blocks_on_rt_mutex()
1113 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
1115 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
1124 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
1126 task->pi_blocked_on = NULL; in task_blocks_on_rt_mutex()
1127 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
1135 raw_spin_lock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1141 if (owner->pi_blocked_on) in task_blocks_on_rt_mutex()
1150 raw_spin_unlock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1166 raw_spin_unlock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1171 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1178 * queue it up.
1180 * Called with lock->wait_lock held and interrupts disabled.
1187 raw_spin_lock(¤t->pi_lock); in mark_wakeup_next_waiter()
1192 * Remove it from current->pi_waiters and deboost. in mark_wakeup_next_waiter()
1195 * rt_mutex_setprio() to update p->pi_top_task before the in mark_wakeup_next_waiter()
1209 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1214 * p->pi_top_task pointer points to a blocked task). This however can in mark_wakeup_next_waiter()
1223 raw_spin_unlock(¤t->pi_lock); in mark_wakeup_next_waiter()
1240 * Slow path try-lock function:
1249 * This can be done without taking the @lock->wait_lock as in rt_mutex_slowtrylock()
1259 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1263 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1277 * Slow path to release a rt-mutex.
1285 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1294 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1295 * rtmutex_lock(foo->lock); <- fast path in rt_mutex_slowunlock()
1296 * free = atomic_dec_and_test(foo->refcnt); in rt_mutex_slowunlock()
1297 * rtmutex_unlock(foo->lock); <- fast path in rt_mutex_slowunlock()
1299 * kfree(foo); in rt_mutex_slowunlock()
1300 * raw_spin_unlock(foo->lock->wait_lock); in rt_mutex_slowunlock()
1305 * lock->wait_lock. So we do the following sequence: in rt_mutex_slowunlock()
1309 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1310 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1315 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1317 * lock->owner = NULL; in rt_mutex_slowunlock()
1318 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1321 /* Drops lock->wait_lock ! */ in rt_mutex_slowunlock()
1325 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1332 * Queue the next waiter for wakeup once we release the wait_lock. in rt_mutex_slowunlock()
1335 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1369 * - the lock owner has been scheduled out in rtmutex_spin_on_owner()
1370 * - current is not longer the top waiter in rtmutex_spin_on_owner()
1371 * - current is requested to reschedule (redundant in rtmutex_spin_on_owner()
1373 * - the VCPU on which owner runs is preempted in rtmutex_spin_on_owner()
1375 if (!owner->on_cpu || need_resched() || in rtmutex_spin_on_owner()
1398 * - rtmutex, futex on all kernels
1399 * - mutex and rwsem substitutions on RT kernels
1405 * Must be called with lock->wait_lock held and interrupts disabled. It must
1415 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1417 raw_spin_lock(¤t->pi_lock); in remove_waiter()
1419 current->pi_blocked_on = NULL; in remove_waiter()
1420 raw_spin_unlock(¤t->pi_lock); in remove_waiter()
1429 raw_spin_lock(&owner->pi_lock); in remove_waiter()
1441 raw_spin_unlock(&owner->pi_lock); in remove_waiter()
1453 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1458 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1462 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
1467 * @timeout: the pre-initialized and started timer, or NULL for none
1468 * @waiter: the pre-initialized rt_mutex_waiter
1470 * Must be called with lock->wait_lock held and interrupts disabled
1487 if (timeout && !timeout->task) { in rt_mutex_slowlock_block()
1488 ret = -ETIMEDOUT; in rt_mutex_slowlock_block()
1492 ret = -EINTR; in rt_mutex_slowlock_block()
1506 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1511 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1523 * If the result is not -EDEADLOCK or the caller requested in rt_mutex_handle_deadlock()
1526 if (res != -EDEADLOCK || detect_deadlock) in rt_mutex_handle_deadlock()
1529 if (build_ww_mutex() && w->ww_ctx) in rt_mutex_handle_deadlock()
1543 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1560 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1580 if (!ww_ctx->is_wait_die) in __rt_mutex_slowlock()
1616 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
1632 * rtmutex with lock->wait_lock held. But we cannot unconditionally in rt_mutex_slowlock()
1636 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1638 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1659 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1667 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1688 raw_spin_unlock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1693 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1712 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1714 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rtlock_slowlock()