1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32
33 #ifndef CONFIG_PREEMPT_RT
34 #include "mutex.h"
35
36 #ifdef CONFIG_DEBUG_MUTEXES
37 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
38 #else
39 # define MUTEX_WARN_ON(cond)
40 #endif
41
42 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
44 {
45 atomic_long_set(&lock->owner, 0);
46 raw_spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
48 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
49 osq_lock_init(&lock->osq);
50 #endif
51
52 debug_mutex_init(lock, name, key);
53 }
54 EXPORT_SYMBOL(__mutex_init);
55
56 /*
57 * @owner: contains: 'struct task_struct *' to the current lock owner,
58 * NULL means not owned. Since task_struct pointers are aligned at
59 * at least L1_CACHE_BYTES, we have low bits to store extra state.
60 *
61 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
62 * Bit1 indicates unlock needs to hand the lock to the top-waiter
63 * Bit2 indicates handoff has been done and we're waiting for pickup.
64 */
65 #define MUTEX_FLAG_WAITERS 0x01
66 #define MUTEX_FLAG_HANDOFF 0x02
67 #define MUTEX_FLAG_PICKUP 0x04
68
69 #define MUTEX_FLAGS 0x07
70
71 /*
72 * Internal helper function; C doesn't allow us to hide it :/
73 *
74 * DO NOT USE (outside of mutex code).
75 */
__mutex_owner(struct mutex * lock)76 static inline struct task_struct *__mutex_owner(struct mutex *lock)
77 {
78 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
79 }
80
__owner_task(unsigned long owner)81 static inline struct task_struct *__owner_task(unsigned long owner)
82 {
83 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
84 }
85
mutex_is_locked(struct mutex * lock)86 bool mutex_is_locked(struct mutex *lock)
87 {
88 return __mutex_owner(lock) != NULL;
89 }
90 EXPORT_SYMBOL(mutex_is_locked);
91
__owner_flags(unsigned long owner)92 static inline unsigned long __owner_flags(unsigned long owner)
93 {
94 return owner & MUTEX_FLAGS;
95 }
96
__mutex_trylock_common(struct mutex * lock,bool handoff)97 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
98 {
99 unsigned long owner, curr = (unsigned long)current;
100
101 owner = atomic_long_read(&lock->owner);
102 for (;;) { /* must loop, can race against a flag */
103 unsigned long flags = __owner_flags(owner);
104 unsigned long task = owner & ~MUTEX_FLAGS;
105
106 if (task) {
107 if (flags & MUTEX_FLAG_PICKUP) {
108 if (task != curr)
109 break;
110 flags &= ~MUTEX_FLAG_PICKUP;
111 } else if (handoff) {
112 if (flags & MUTEX_FLAG_HANDOFF)
113 break;
114 flags |= MUTEX_FLAG_HANDOFF;
115 } else {
116 break;
117 }
118 } else {
119 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
120 task = curr;
121 }
122
123 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
124 if (task == curr)
125 return NULL;
126 break;
127 }
128 }
129
130 return __owner_task(owner);
131 }
132
133 /*
134 * Trylock or set HANDOFF
135 */
__mutex_trylock_or_handoff(struct mutex * lock,bool handoff)136 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
137 {
138 return !__mutex_trylock_common(lock, handoff);
139 }
140
141 /*
142 * Actual trylock that will work on any unlocked state.
143 */
__mutex_trylock(struct mutex * lock)144 static inline bool __mutex_trylock(struct mutex *lock)
145 {
146 return !__mutex_trylock_common(lock, false);
147 }
148
149 #ifndef CONFIG_DEBUG_LOCK_ALLOC
150 /*
151 * Lockdep annotations are contained to the slow paths for simplicity.
152 * There is nothing that would stop spreading the lockdep annotations outwards
153 * except more code.
154 */
155
156 /*
157 * Optimistic trylock that only works in the uncontended case. Make sure to
158 * follow with a __mutex_trylock() before failing.
159 */
__mutex_trylock_fast(struct mutex * lock)160 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
161 {
162 unsigned long curr = (unsigned long)current;
163 unsigned long zero = 0UL;
164
165 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
166 return true;
167
168 return false;
169 }
170
__mutex_unlock_fast(struct mutex * lock)171 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
172 {
173 unsigned long curr = (unsigned long)current;
174
175 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
176 }
177 #endif
178
__mutex_set_flag(struct mutex * lock,unsigned long flag)179 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
180 {
181 atomic_long_or(flag, &lock->owner);
182 }
183
__mutex_clear_flag(struct mutex * lock,unsigned long flag)184 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
185 {
186 atomic_long_andnot(flag, &lock->owner);
187 }
188
__mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter)189 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
190 {
191 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
192 }
193
194 /*
195 * Add @waiter to a given location in the lock wait_list and set the
196 * FLAG_WAITERS flag if it's the first waiter.
197 */
198 static void
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list)199 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
200 struct list_head *list)
201 {
202 debug_mutex_add_waiter(lock, waiter, current);
203
204 list_add_tail(&waiter->list, list);
205 if (__mutex_waiter_is_first(lock, waiter))
206 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
207 }
208
209 static void
__mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter)210 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
211 {
212 list_del(&waiter->list);
213 if (likely(list_empty(&lock->wait_list)))
214 __mutex_clear_flag(lock, MUTEX_FLAGS);
215
216 debug_mutex_remove_waiter(lock, waiter, current);
217 }
218
219 /*
220 * Give up ownership to a specific task, when @task = NULL, this is equivalent
221 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
222 * WAITERS. Provides RELEASE semantics like a regular unlock, the
223 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
224 */
__mutex_handoff(struct mutex * lock,struct task_struct * task)225 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
226 {
227 unsigned long owner = atomic_long_read(&lock->owner);
228
229 for (;;) {
230 unsigned long new;
231
232 MUTEX_WARN_ON(__owner_task(owner) != current);
233 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
234
235 new = (owner & MUTEX_FLAG_WAITERS);
236 new |= (unsigned long)task;
237 if (task)
238 new |= MUTEX_FLAG_PICKUP;
239
240 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
241 break;
242 }
243 }
244
245 #ifndef CONFIG_DEBUG_LOCK_ALLOC
246 /*
247 * We split the mutex lock/unlock logic into separate fastpath and
248 * slowpath functions, to reduce the register pressure on the fastpath.
249 * We also put the fastpath first in the kernel image, to make sure the
250 * branch is predicted by the CPU as default-untaken.
251 */
252 static void __sched __mutex_lock_slowpath(struct mutex *lock);
253
254 /**
255 * mutex_lock - acquire the mutex
256 * @lock: the mutex to be acquired
257 *
258 * Lock the mutex exclusively for this task. If the mutex is not
259 * available right now, it will sleep until it can get it.
260 *
261 * The mutex must later on be released by the same task that
262 * acquired it. Recursive locking is not allowed. The task
263 * may not exit without first unlocking the mutex. Also, kernel
264 * memory where the mutex resides must not be freed with
265 * the mutex still locked. The mutex must first be initialized
266 * (or statically defined) before it can be locked. memset()-ing
267 * the mutex to 0 is not allowed.
268 *
269 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
270 * checks that will enforce the restrictions and will also do
271 * deadlock debugging)
272 *
273 * This function is similar to (but not equivalent to) down().
274 */
mutex_lock(struct mutex * lock)275 void __sched mutex_lock(struct mutex *lock)
276 {
277 might_sleep();
278
279 if (!__mutex_trylock_fast(lock))
280 __mutex_lock_slowpath(lock);
281 }
282 EXPORT_SYMBOL(mutex_lock);
283 #endif
284
285 #include "ww_mutex.h"
286
287 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
288
289 /*
290 * Trylock variant that returns the owning task on failure.
291 */
__mutex_trylock_or_owner(struct mutex * lock)292 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
293 {
294 return __mutex_trylock_common(lock, false);
295 }
296
297 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)298 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
299 struct mutex_waiter *waiter)
300 {
301 struct ww_mutex *ww;
302
303 ww = container_of(lock, struct ww_mutex, base);
304
305 /*
306 * If ww->ctx is set the contents are undefined, only
307 * by acquiring wait_lock there is a guarantee that
308 * they are not invalid when reading.
309 *
310 * As such, when deadlock detection needs to be
311 * performed the optimistic spinning cannot be done.
312 *
313 * Check this in every inner iteration because we may
314 * be racing against another thread's ww_mutex_lock.
315 */
316 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
317 return false;
318
319 /*
320 * If we aren't on the wait list yet, cancel the spin
321 * if there are waiters. We want to avoid stealing the
322 * lock from a waiter with an earlier stamp, since the
323 * other thread may already own a lock that we also
324 * need.
325 */
326 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
327 return false;
328
329 /*
330 * Similarly, stop spinning if we are no longer the
331 * first waiter.
332 */
333 if (waiter && !__mutex_waiter_is_first(lock, waiter))
334 return false;
335
336 return true;
337 }
338
339 /*
340 * Look out! "owner" is an entirely speculative pointer access and not
341 * reliable.
342 *
343 * "noinline" so that this function shows up on perf profiles.
344 */
345 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)346 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
347 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
348 {
349 bool ret = true;
350
351 rcu_read_lock();
352 while (__mutex_owner(lock) == owner) {
353 /*
354 * Ensure we emit the owner->on_cpu, dereference _after_
355 * checking lock->owner still matches owner. If that fails,
356 * owner might point to freed memory. If it still matches,
357 * the rcu_read_lock() ensures the memory stays valid.
358 */
359 barrier();
360
361 /*
362 * Use vcpu_is_preempted to detect lock holder preemption issue.
363 */
364 if (!owner->on_cpu || need_resched() ||
365 vcpu_is_preempted(task_cpu(owner))) {
366 ret = false;
367 break;
368 }
369
370 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
371 ret = false;
372 break;
373 }
374
375 cpu_relax();
376 }
377 rcu_read_unlock();
378
379 return ret;
380 }
381
382 /*
383 * Initial check for entering the mutex spinning loop
384 */
mutex_can_spin_on_owner(struct mutex * lock)385 static inline int mutex_can_spin_on_owner(struct mutex *lock)
386 {
387 struct task_struct *owner;
388 int retval = 1;
389
390 if (need_resched())
391 return 0;
392
393 rcu_read_lock();
394 owner = __mutex_owner(lock);
395
396 /*
397 * As lock holder preemption issue, we both skip spinning if task is not
398 * on cpu or its cpu is preempted
399 */
400 if (owner)
401 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
402 rcu_read_unlock();
403
404 /*
405 * If lock->owner is not set, the mutex has been released. Return true
406 * such that we'll trylock in the spin path, which is a faster option
407 * than the blocking slow path.
408 */
409 return retval;
410 }
411
412 /*
413 * Optimistic spinning.
414 *
415 * We try to spin for acquisition when we find that the lock owner
416 * is currently running on a (different) CPU and while we don't
417 * need to reschedule. The rationale is that if the lock owner is
418 * running, it is likely to release the lock soon.
419 *
420 * The mutex spinners are queued up using MCS lock so that only one
421 * spinner can compete for the mutex. However, if mutex spinning isn't
422 * going to happen, there is no point in going through the lock/unlock
423 * overhead.
424 *
425 * Returns true when the lock was taken, otherwise false, indicating
426 * that we need to jump to the slowpath and sleep.
427 *
428 * The waiter flag is set to true if the spinner is a waiter in the wait
429 * queue. The waiter-spinner will spin on the lock directly and concurrently
430 * with the spinner at the head of the OSQ, if present, until the owner is
431 * changed to itself.
432 */
433 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)434 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
435 struct mutex_waiter *waiter)
436 {
437 if (!waiter) {
438 /*
439 * The purpose of the mutex_can_spin_on_owner() function is
440 * to eliminate the overhead of osq_lock() and osq_unlock()
441 * in case spinning isn't possible. As a waiter-spinner
442 * is not going to take OSQ lock anyway, there is no need
443 * to call mutex_can_spin_on_owner().
444 */
445 if (!mutex_can_spin_on_owner(lock))
446 goto fail;
447
448 /*
449 * In order to avoid a stampede of mutex spinners trying to
450 * acquire the mutex all at once, the spinners need to take a
451 * MCS (queued) lock first before spinning on the owner field.
452 */
453 if (!osq_lock(&lock->osq))
454 goto fail;
455 }
456
457 for (;;) {
458 struct task_struct *owner;
459
460 /* Try to acquire the mutex... */
461 owner = __mutex_trylock_or_owner(lock);
462 if (!owner)
463 break;
464
465 /*
466 * There's an owner, wait for it to either
467 * release the lock or go to sleep.
468 */
469 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
470 goto fail_unlock;
471
472 /*
473 * The cpu_relax() call is a compiler barrier which forces
474 * everything in this loop to be re-loaded. We don't need
475 * memory barriers as we'll eventually observe the right
476 * values at the cost of a few extra spins.
477 */
478 cpu_relax();
479 }
480
481 if (!waiter)
482 osq_unlock(&lock->osq);
483
484 return true;
485
486
487 fail_unlock:
488 if (!waiter)
489 osq_unlock(&lock->osq);
490
491 fail:
492 /*
493 * If we fell out of the spin path because of need_resched(),
494 * reschedule now, before we try-lock the mutex. This avoids getting
495 * scheduled out right after we obtained the mutex.
496 */
497 if (need_resched()) {
498 /*
499 * We _should_ have TASK_RUNNING here, but just in case
500 * we do not, make it so, otherwise we might get stuck.
501 */
502 __set_current_state(TASK_RUNNING);
503 schedule_preempt_disabled();
504 }
505
506 return false;
507 }
508 #else
509 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)510 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
511 struct mutex_waiter *waiter)
512 {
513 return false;
514 }
515 #endif
516
517 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
518
519 /**
520 * mutex_unlock - release the mutex
521 * @lock: the mutex to be released
522 *
523 * Unlock a mutex that has been locked by this task previously.
524 *
525 * This function must not be used in interrupt context. Unlocking
526 * of a not locked mutex is not allowed.
527 *
528 * This function is similar to (but not equivalent to) up().
529 */
mutex_unlock(struct mutex * lock)530 void __sched mutex_unlock(struct mutex *lock)
531 {
532 #ifndef CONFIG_DEBUG_LOCK_ALLOC
533 if (__mutex_unlock_fast(lock))
534 return;
535 #endif
536 __mutex_unlock_slowpath(lock, _RET_IP_);
537 }
538 EXPORT_SYMBOL(mutex_unlock);
539
540 /**
541 * ww_mutex_unlock - release the w/w mutex
542 * @lock: the mutex to be released
543 *
544 * Unlock a mutex that has been locked by this task previously with any of the
545 * ww_mutex_lock* functions (with or without an acquire context). It is
546 * forbidden to release the locks after releasing the acquire context.
547 *
548 * This function must not be used in interrupt context. Unlocking
549 * of a unlocked mutex is not allowed.
550 */
ww_mutex_unlock(struct ww_mutex * lock)551 void __sched ww_mutex_unlock(struct ww_mutex *lock)
552 {
553 __ww_mutex_unlock(lock);
554 mutex_unlock(&lock->base);
555 }
556 EXPORT_SYMBOL(ww_mutex_unlock);
557
558 /*
559 * Lock a mutex (possibly interruptible), slowpath:
560 */
561 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)562 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
563 struct lockdep_map *nest_lock, unsigned long ip,
564 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
565 {
566 struct mutex_waiter waiter;
567 struct ww_mutex *ww;
568 int ret;
569
570 if (!use_ww_ctx)
571 ww_ctx = NULL;
572
573 might_sleep();
574
575 MUTEX_WARN_ON(lock->magic != lock);
576
577 ww = container_of(lock, struct ww_mutex, base);
578 if (ww_ctx) {
579 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
580 return -EALREADY;
581
582 /*
583 * Reset the wounded flag after a kill. No other process can
584 * race and wound us here since they can't have a valid owner
585 * pointer if we don't have any locks held.
586 */
587 if (ww_ctx->acquired == 0)
588 ww_ctx->wounded = 0;
589
590 #ifdef CONFIG_DEBUG_LOCK_ALLOC
591 nest_lock = &ww_ctx->dep_map;
592 #endif
593 }
594
595 preempt_disable();
596 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
597
598 if (__mutex_trylock(lock) ||
599 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
600 /* got the lock, yay! */
601 lock_acquired(&lock->dep_map, ip);
602 if (ww_ctx)
603 ww_mutex_set_context_fastpath(ww, ww_ctx);
604 preempt_enable();
605 return 0;
606 }
607
608 raw_spin_lock(&lock->wait_lock);
609 /*
610 * After waiting to acquire the wait_lock, try again.
611 */
612 if (__mutex_trylock(lock)) {
613 if (ww_ctx)
614 __ww_mutex_check_waiters(lock, ww_ctx);
615
616 goto skip_wait;
617 }
618
619 debug_mutex_lock_common(lock, &waiter);
620 waiter.task = current;
621 if (use_ww_ctx)
622 waiter.ww_ctx = ww_ctx;
623
624 lock_contended(&lock->dep_map, ip);
625
626 if (!use_ww_ctx) {
627 /* add waiting tasks to the end of the waitqueue (FIFO): */
628 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
629 } else {
630 /*
631 * Add in stamp order, waking up waiters that must kill
632 * themselves.
633 */
634 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
635 if (ret)
636 goto err_early_kill;
637 }
638
639 set_current_state(state);
640 for (;;) {
641 bool first;
642
643 /*
644 * Once we hold wait_lock, we're serialized against
645 * mutex_unlock() handing the lock off to us, do a trylock
646 * before testing the error conditions to make sure we pick up
647 * the handoff.
648 */
649 if (__mutex_trylock(lock))
650 goto acquired;
651
652 /*
653 * Check for signals and kill conditions while holding
654 * wait_lock. This ensures the lock cancellation is ordered
655 * against mutex_unlock() and wake-ups do not go missing.
656 */
657 if (signal_pending_state(state, current)) {
658 ret = -EINTR;
659 goto err;
660 }
661
662 if (ww_ctx) {
663 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
664 if (ret)
665 goto err;
666 }
667
668 raw_spin_unlock(&lock->wait_lock);
669 schedule_preempt_disabled();
670
671 first = __mutex_waiter_is_first(lock, &waiter);
672
673 set_current_state(state);
674 /*
675 * Here we order against unlock; we must either see it change
676 * state back to RUNNING and fall through the next schedule(),
677 * or we must see its unlock and acquire.
678 */
679 if (__mutex_trylock_or_handoff(lock, first) ||
680 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
681 break;
682
683 raw_spin_lock(&lock->wait_lock);
684 }
685 raw_spin_lock(&lock->wait_lock);
686 acquired:
687 __set_current_state(TASK_RUNNING);
688
689 if (ww_ctx) {
690 /*
691 * Wound-Wait; we stole the lock (!first_waiter), check the
692 * waiters as anyone might want to wound us.
693 */
694 if (!ww_ctx->is_wait_die &&
695 !__mutex_waiter_is_first(lock, &waiter))
696 __ww_mutex_check_waiters(lock, ww_ctx);
697 }
698
699 __mutex_remove_waiter(lock, &waiter);
700
701 debug_mutex_free_waiter(&waiter);
702
703 skip_wait:
704 /* got the lock - cleanup and rejoice! */
705 lock_acquired(&lock->dep_map, ip);
706
707 if (ww_ctx)
708 ww_mutex_lock_acquired(ww, ww_ctx);
709
710 raw_spin_unlock(&lock->wait_lock);
711 preempt_enable();
712 return 0;
713
714 err:
715 __set_current_state(TASK_RUNNING);
716 __mutex_remove_waiter(lock, &waiter);
717 err_early_kill:
718 raw_spin_unlock(&lock->wait_lock);
719 debug_mutex_free_waiter(&waiter);
720 mutex_release(&lock->dep_map, ip);
721 preempt_enable();
722 return ret;
723 }
724
725 static int __sched
__mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)726 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
727 struct lockdep_map *nest_lock, unsigned long ip)
728 {
729 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
730 }
731
732 static int __sched
__ww_mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,unsigned long ip,struct ww_acquire_ctx * ww_ctx)733 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
734 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
735 {
736 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
737 }
738
739 #ifdef CONFIG_DEBUG_LOCK_ALLOC
740 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)741 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
742 {
743 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
744 }
745
746 EXPORT_SYMBOL_GPL(mutex_lock_nested);
747
748 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)749 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
750 {
751 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
752 }
753 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
754
755 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)756 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
757 {
758 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
759 }
760 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
761
762 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)763 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
764 {
765 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
766 }
767 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
768
769 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)770 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
771 {
772 int token;
773
774 might_sleep();
775
776 token = io_schedule_prepare();
777 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
778 subclass, NULL, _RET_IP_, NULL, 0);
779 io_schedule_finish(token);
780 }
781 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
782
783 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)784 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
785 {
786 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
787 unsigned tmp;
788
789 if (ctx->deadlock_inject_countdown-- == 0) {
790 tmp = ctx->deadlock_inject_interval;
791 if (tmp > UINT_MAX/4)
792 tmp = UINT_MAX;
793 else
794 tmp = tmp*2 + tmp + tmp/2;
795
796 ctx->deadlock_inject_interval = tmp;
797 ctx->deadlock_inject_countdown = tmp;
798 ctx->contending_lock = lock;
799
800 ww_mutex_unlock(lock);
801
802 return -EDEADLK;
803 }
804 #endif
805
806 return 0;
807 }
808
809 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)810 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
811 {
812 int ret;
813
814 might_sleep();
815 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
816 0, _RET_IP_, ctx);
817 if (!ret && ctx && ctx->acquired > 1)
818 return ww_mutex_deadlock_injection(lock, ctx);
819
820 return ret;
821 }
822 EXPORT_SYMBOL_GPL(ww_mutex_lock);
823
824 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)825 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
826 {
827 int ret;
828
829 might_sleep();
830 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
831 0, _RET_IP_, ctx);
832
833 if (!ret && ctx && ctx->acquired > 1)
834 return ww_mutex_deadlock_injection(lock, ctx);
835
836 return ret;
837 }
838 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
839
840 #endif
841
842 /*
843 * Release the lock, slowpath:
844 */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)845 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
846 {
847 struct task_struct *next = NULL;
848 DEFINE_WAKE_Q(wake_q);
849 unsigned long owner;
850
851 mutex_release(&lock->dep_map, ip);
852
853 /*
854 * Release the lock before (potentially) taking the spinlock such that
855 * other contenders can get on with things ASAP.
856 *
857 * Except when HANDOFF, in that case we must not clear the owner field,
858 * but instead set it to the top waiter.
859 */
860 owner = atomic_long_read(&lock->owner);
861 for (;;) {
862 MUTEX_WARN_ON(__owner_task(owner) != current);
863 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
864
865 if (owner & MUTEX_FLAG_HANDOFF)
866 break;
867
868 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
869 if (owner & MUTEX_FLAG_WAITERS)
870 break;
871
872 return;
873 }
874 }
875
876 raw_spin_lock(&lock->wait_lock);
877 debug_mutex_unlock(lock);
878 if (!list_empty(&lock->wait_list)) {
879 /* get the first entry from the wait-list: */
880 struct mutex_waiter *waiter =
881 list_first_entry(&lock->wait_list,
882 struct mutex_waiter, list);
883
884 next = waiter->task;
885
886 debug_mutex_wake_waiter(lock, waiter);
887 wake_q_add(&wake_q, next);
888 }
889
890 if (owner & MUTEX_FLAG_HANDOFF)
891 __mutex_handoff(lock, next);
892
893 raw_spin_unlock(&lock->wait_lock);
894
895 wake_up_q(&wake_q);
896 }
897
898 #ifndef CONFIG_DEBUG_LOCK_ALLOC
899 /*
900 * Here come the less common (and hence less performance-critical) APIs:
901 * mutex_lock_interruptible() and mutex_trylock().
902 */
903 static noinline int __sched
904 __mutex_lock_killable_slowpath(struct mutex *lock);
905
906 static noinline int __sched
907 __mutex_lock_interruptible_slowpath(struct mutex *lock);
908
909 /**
910 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
911 * @lock: The mutex to be acquired.
912 *
913 * Lock the mutex like mutex_lock(). If a signal is delivered while the
914 * process is sleeping, this function will return without acquiring the
915 * mutex.
916 *
917 * Context: Process context.
918 * Return: 0 if the lock was successfully acquired or %-EINTR if a
919 * signal arrived.
920 */
mutex_lock_interruptible(struct mutex * lock)921 int __sched mutex_lock_interruptible(struct mutex *lock)
922 {
923 might_sleep();
924
925 if (__mutex_trylock_fast(lock))
926 return 0;
927
928 return __mutex_lock_interruptible_slowpath(lock);
929 }
930
931 EXPORT_SYMBOL(mutex_lock_interruptible);
932
933 /**
934 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
935 * @lock: The mutex to be acquired.
936 *
937 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
938 * the current process is delivered while the process is sleeping, this
939 * function will return without acquiring the mutex.
940 *
941 * Context: Process context.
942 * Return: 0 if the lock was successfully acquired or %-EINTR if a
943 * fatal signal arrived.
944 */
mutex_lock_killable(struct mutex * lock)945 int __sched mutex_lock_killable(struct mutex *lock)
946 {
947 might_sleep();
948
949 if (__mutex_trylock_fast(lock))
950 return 0;
951
952 return __mutex_lock_killable_slowpath(lock);
953 }
954 EXPORT_SYMBOL(mutex_lock_killable);
955
956 /**
957 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
958 * @lock: The mutex to be acquired.
959 *
960 * Lock the mutex like mutex_lock(). While the task is waiting for this
961 * mutex, it will be accounted as being in the IO wait state by the
962 * scheduler.
963 *
964 * Context: Process context.
965 */
mutex_lock_io(struct mutex * lock)966 void __sched mutex_lock_io(struct mutex *lock)
967 {
968 int token;
969
970 token = io_schedule_prepare();
971 mutex_lock(lock);
972 io_schedule_finish(token);
973 }
974 EXPORT_SYMBOL_GPL(mutex_lock_io);
975
976 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)977 __mutex_lock_slowpath(struct mutex *lock)
978 {
979 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
980 }
981
982 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)983 __mutex_lock_killable_slowpath(struct mutex *lock)
984 {
985 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
986 }
987
988 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)989 __mutex_lock_interruptible_slowpath(struct mutex *lock)
990 {
991 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
992 }
993
994 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)995 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
996 {
997 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
998 _RET_IP_, ctx);
999 }
1000
1001 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1002 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1003 struct ww_acquire_ctx *ctx)
1004 {
1005 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1006 _RET_IP_, ctx);
1007 }
1008
1009 #endif
1010
1011 /**
1012 * mutex_trylock - try to acquire the mutex, without waiting
1013 * @lock: the mutex to be acquired
1014 *
1015 * Try to acquire the mutex atomically. Returns 1 if the mutex
1016 * has been acquired successfully, and 0 on contention.
1017 *
1018 * NOTE: this function follows the spin_trylock() convention, so
1019 * it is negated from the down_trylock() return values! Be careful
1020 * about this when converting semaphore users to mutexes.
1021 *
1022 * This function must not be used in interrupt context. The
1023 * mutex must be released by the same task that acquired it.
1024 */
mutex_trylock(struct mutex * lock)1025 int __sched mutex_trylock(struct mutex *lock)
1026 {
1027 bool locked;
1028
1029 MUTEX_WARN_ON(lock->magic != lock);
1030
1031 locked = __mutex_trylock(lock);
1032 if (locked)
1033 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1034
1035 return locked;
1036 }
1037 EXPORT_SYMBOL(mutex_trylock);
1038
1039 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1040 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1041 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1042 {
1043 might_sleep();
1044
1045 if (__mutex_trylock_fast(&lock->base)) {
1046 if (ctx)
1047 ww_mutex_set_context_fastpath(lock, ctx);
1048 return 0;
1049 }
1050
1051 return __ww_mutex_lock_slowpath(lock, ctx);
1052 }
1053 EXPORT_SYMBOL(ww_mutex_lock);
1054
1055 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1056 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1057 {
1058 might_sleep();
1059
1060 if (__mutex_trylock_fast(&lock->base)) {
1061 if (ctx)
1062 ww_mutex_set_context_fastpath(lock, ctx);
1063 return 0;
1064 }
1065
1066 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1067 }
1068 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1069
1070 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1071 #endif /* !CONFIG_PREEMPT_RT */
1072
1073 /**
1074 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1075 * @cnt: the atomic which we are to dec
1076 * @lock: the mutex to return holding if we dec to 0
1077 *
1078 * return true and hold lock if we dec to 0, return false otherwise
1079 */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1080 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1081 {
1082 /* dec if we can't possibly hit 0 */
1083 if (atomic_add_unless(cnt, -1, 1))
1084 return 0;
1085 /* we might hit 0, so take the lock */
1086 mutex_lock(lock);
1087 if (!atomic_dec_and_test(cnt)) {
1088 /* when we actually did the dec, we didn't hit 0 */
1089 mutex_unlock(lock);
1090 return 0;
1091 }
1092 /* we hit 0, and we hold the lock */
1093 return 1;
1094 }
1095 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1096