Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
19 * Also see Documentation/locking/mutex-design.rst.
34 #include <trace/events/lock.h>
46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
48 atomic_long_set(&lock->owner, 0); in __mutex_init()
49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
52 osq_lock_init(&lock->osq); in __mutex_init()
55 debug_mutex_init(lock, name, key); in __mutex_init()
60 * @owner: contains: 'struct task_struct *' to the current lock owner,
64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
79 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument
81 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
89 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument
91 return __mutex_owner(lock) != NULL; in mutex_is_locked()
101 * Returns: __mutex_owner(lock) on failure or NULL on success.
103 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) in __mutex_trylock_common() argument
107 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common()
129 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { in __mutex_trylock_common()
142 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) in __mutex_trylock_or_handoff() argument
144 return !__mutex_trylock_common(lock, handoff); in __mutex_trylock_or_handoff()
150 static inline bool __mutex_trylock(struct mutex *lock) in __mutex_trylock() argument
152 return !__mutex_trylock_common(lock, false); in __mutex_trylock()
166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) in __mutex_trylock_fast() argument
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast()
177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) in __mutex_unlock_fast() argument
181 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
185 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) in __mutex_set_flag() argument
187 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
190 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) in __mutex_clear_flag() argument
192 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
195 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first() argument
197 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; in __mutex_waiter_is_first()
201 * Add @waiter to a given location in the lock wait_list and set the
205 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter() argument
208 debug_mutex_add_waiter(lock, waiter, current); in __mutex_add_waiter()
210 list_add_tail(&waiter->list, list); in __mutex_add_waiter()
211 if (__mutex_waiter_is_first(lock, waiter)) in __mutex_add_waiter()
212 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); in __mutex_add_waiter()
216 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_remove_waiter() argument
218 list_del(&waiter->list); in __mutex_remove_waiter()
219 if (likely(list_empty(&lock->wait_list))) in __mutex_remove_waiter()
220 __mutex_clear_flag(lock, MUTEX_FLAGS); in __mutex_remove_waiter()
222 debug_mutex_remove_waiter(lock, waiter, current); in __mutex_remove_waiter()
231 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) in __mutex_handoff() argument
233 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff()
246 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) in __mutex_handoff()
253 * We split the mutex lock/unlock logic into separate fastpath and
256 * branch is predicted by the CPU as default-untaken.
258 static void __sched __mutex_lock_slowpath(struct mutex *lock);
261 * mutex_lock - acquire the mutex
262 * @lock: the mutex to be acquired
264 * Lock the mutex exclusively for this task. If the mutex is not
272 * (or statically defined) before it can be locked. memset()-ing
281 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument
285 if (!__mutex_trylock_fast(lock)) in mutex_lock()
286 __mutex_lock_slowpath(lock); in mutex_lock()
298 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner() argument
300 return __mutex_trylock_common(lock, false); in __mutex_trylock_or_owner()
304 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in ww_mutex_spin_on_owner() argument
309 ww = container_of(lock, struct ww_mutex, base); in ww_mutex_spin_on_owner()
312 * If ww->ctx is set the contents are undefined, only in ww_mutex_spin_on_owner()
322 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) in ww_mutex_spin_on_owner()
328 * lock from a waiter with an earlier stamp, since the in ww_mutex_spin_on_owner()
329 * other thread may already own a lock that we also in ww_mutex_spin_on_owner()
332 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
339 if (waiter && !__mutex_waiter_is_first(lock, waiter)) in ww_mutex_spin_on_owner()
352 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner() argument
359 while (__mutex_owner(lock) == owner) { in mutex_spin_on_owner()
361 * Ensure we emit the owner->on_cpu, dereference _after_ in mutex_spin_on_owner()
362 * checking lock->owner still matches owner. And we already in mutex_spin_on_owner()
363 * disabled preemption which is equal to the RCU read-side in mutex_spin_on_owner()
371 * Use vcpu_is_preempted to detect lock holder preemption issue. in mutex_spin_on_owner()
378 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { in mutex_spin_on_owner()
392 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner() argument
403 * We already disabled preemption which is equal to the RCU read-side in mutex_can_spin_on_owner()
407 owner = __mutex_owner(lock); in mutex_can_spin_on_owner()
412 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
422 * We try to spin for acquisition when we find that the lock owner
424 * need to reschedule. The rationale is that if the lock owner is
425 * running, it is likely to release the lock soon.
427 * The mutex spinners are queued up using MCS lock so that only one
429 * going to happen, there is no point in going through the lock/unlock
432 * Returns true when the lock was taken, otherwise false, indicating
436 * queue. The waiter-spinner will spin on the lock directly and concurrently
441 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin() argument
448 * in case spinning isn't possible. As a waiter-spinner in mutex_optimistic_spin()
449 * is not going to take OSQ lock anyway, there is no need in mutex_optimistic_spin()
452 if (!mutex_can_spin_on_owner(lock)) in mutex_optimistic_spin()
458 * MCS (queued) lock first before spinning on the owner field. in mutex_optimistic_spin()
460 if (!osq_lock(&lock->osq)) in mutex_optimistic_spin()
468 owner = __mutex_trylock_or_owner(lock); in mutex_optimistic_spin()
474 * release the lock or go to sleep. in mutex_optimistic_spin()
476 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) in mutex_optimistic_spin()
481 * everything in this loop to be re-loaded. We don't need in mutex_optimistic_spin()
489 osq_unlock(&lock->osq); in mutex_optimistic_spin()
496 osq_unlock(&lock->osq); in mutex_optimistic_spin()
501 * reschedule now, before we try-lock the mutex. This avoids getting in mutex_optimistic_spin()
517 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin() argument
524 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
527 * mutex_unlock - release the mutex
528 * @lock: the mutex to be released
537 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock() argument
540 if (__mutex_unlock_fast(lock)) in mutex_unlock()
543 __mutex_unlock_slowpath(lock, _RET_IP_); in mutex_unlock()
548 * ww_mutex_unlock - release the w/w mutex
549 * @lock: the mutex to be released
558 void __sched ww_mutex_unlock(struct ww_mutex *lock) in ww_mutex_unlock() argument
560 __ww_mutex_unlock(lock); in ww_mutex_unlock()
561 mutex_unlock(&lock->base); in ww_mutex_unlock()
566 * Lock a mutex (possibly interruptible), slowpath:
569 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock_common() argument
582 MUTEX_WARN_ON(lock->magic != lock); in __mutex_lock_common()
584 ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
586 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common()
587 return -EALREADY; in __mutex_lock_common()
594 if (ww_ctx->acquired == 0) in __mutex_lock_common()
595 ww_ctx->wounded = 0; in __mutex_lock_common()
598 nest_lock = &ww_ctx->dep_map; in __mutex_lock_common()
603 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
605 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); in __mutex_lock_common()
606 if (__mutex_trylock(lock) || in __mutex_lock_common()
607 mutex_optimistic_spin(lock, ww_ctx, NULL)) { in __mutex_lock_common()
608 /* got the lock, yay! */ in __mutex_lock_common()
609 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
612 trace_contention_end(lock, 0); in __mutex_lock_common()
617 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common()
621 if (__mutex_trylock(lock)) { in __mutex_lock_common()
623 __ww_mutex_check_waiters(lock, ww_ctx); in __mutex_lock_common()
628 debug_mutex_lock_common(lock, &waiter); in __mutex_lock_common()
633 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
637 __mutex_add_waiter(lock, &waiter, &lock->wait_list); in __mutex_lock_common()
643 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); in __mutex_lock_common()
649 trace_contention_begin(lock, LCB_F_MUTEX); in __mutex_lock_common()
655 * mutex_unlock() handing the lock off to us, do a trylock in __mutex_lock_common()
659 if (__mutex_trylock(lock)) in __mutex_lock_common()
664 * wait_lock. This ensures the lock cancellation is ordered in __mutex_lock_common()
665 * against mutex_unlock() and wake-ups do not go missing. in __mutex_lock_common()
668 ret = -EINTR; in __mutex_lock_common()
673 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); in __mutex_lock_common()
678 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common()
681 first = __mutex_waiter_is_first(lock, &waiter); in __mutex_lock_common()
689 if (__mutex_trylock_or_handoff(lock, first)) in __mutex_lock_common()
693 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); in __mutex_lock_common()
694 if (mutex_optimistic_spin(lock, ww_ctx, &waiter)) in __mutex_lock_common()
696 trace_contention_begin(lock, LCB_F_MUTEX); in __mutex_lock_common()
699 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common()
701 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common()
707 * Wound-Wait; we stole the lock (!first_waiter), check the in __mutex_lock_common()
710 if (!ww_ctx->is_wait_die && in __mutex_lock_common()
711 !__mutex_waiter_is_first(lock, &waiter)) in __mutex_lock_common()
712 __ww_mutex_check_waiters(lock, ww_ctx); in __mutex_lock_common()
715 __mutex_remove_waiter(lock, &waiter); in __mutex_lock_common()
720 /* got the lock - cleanup and rejoice! */ in __mutex_lock_common()
721 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
722 trace_contention_end(lock, 0); in __mutex_lock_common()
727 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common()
733 __mutex_remove_waiter(lock, &waiter); in __mutex_lock_common()
735 trace_contention_end(lock, ret); in __mutex_lock_common()
736 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common()
738 mutex_release(&lock->dep_map, ip); in __mutex_lock_common()
744 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __mutex_lock() argument
747 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); in __mutex_lock()
751 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, in __ww_mutex_lock() argument
754 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); in __ww_mutex_lock()
758 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
759 * @ww: mutex to lock
766 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
773 return mutex_trylock(&ww->base); in ww_mutex_trylock()
775 MUTEX_WARN_ON(ww->base.magic != &ww->base); in ww_mutex_trylock()
782 if (ww_ctx->acquired == 0) in ww_mutex_trylock()
783 ww_ctx->wounded = 0; in ww_mutex_trylock()
785 if (__mutex_trylock(&ww->base)) { in ww_mutex_trylock()
787 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); in ww_mutex_trylock()
797 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested() argument
799 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_nested()
805 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock() argument
807 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); in _mutex_lock_nest_lock()
812 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested() argument
814 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); in mutex_lock_killable_nested()
819 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested() argument
821 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_interruptible_nested()
826 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_io_nested() argument
833 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, in mutex_lock_io_nested()
840 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_deadlock_injection() argument
845 if (ctx->deadlock_inject_countdown-- == 0) { in ww_mutex_deadlock_injection()
846 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection()
852 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection()
853 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection()
854 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
856 ww_mutex_unlock(lock); in ww_mutex_deadlock_injection()
858 return -EDEADLK; in ww_mutex_deadlock_injection()
866 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument
871 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, in ww_mutex_lock()
873 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock()
874 return ww_mutex_deadlock_injection(lock, ctx); in ww_mutex_lock()
881 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument
886 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, in ww_mutex_lock_interruptible()
889 if (!ret && ctx && ctx->acquired > 1) in ww_mutex_lock_interruptible()
890 return ww_mutex_deadlock_injection(lock, ctx); in ww_mutex_lock_interruptible()
899 * Release the lock, slowpath:
901 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) in __mutex_unlock_slowpath() argument
907 mutex_release(&lock->dep_map, ip); in __mutex_unlock_slowpath()
910 * Release the lock before (potentially) taking the spinlock such that in __mutex_unlock_slowpath()
916 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
924 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { in __mutex_unlock_slowpath()
932 raw_spin_lock(&lock->wait_lock); in __mutex_unlock_slowpath()
933 debug_mutex_unlock(lock); in __mutex_unlock_slowpath()
934 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_slowpath()
935 /* get the first entry from the wait-list: */ in __mutex_unlock_slowpath()
937 list_first_entry(&lock->wait_list, in __mutex_unlock_slowpath()
940 next = waiter->task; in __mutex_unlock_slowpath()
942 debug_mutex_wake_waiter(lock, waiter); in __mutex_unlock_slowpath()
947 __mutex_handoff(lock, next); in __mutex_unlock_slowpath()
949 raw_spin_unlock(&lock->wait_lock); in __mutex_unlock_slowpath()
956 * Here come the less common (and hence less performance-critical) APIs:
960 __mutex_lock_killable_slowpath(struct mutex *lock);
963 __mutex_lock_interruptible_slowpath(struct mutex *lock);
966 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
967 * @lock: The mutex to be acquired.
969 * Lock the mutex like mutex_lock(). If a signal is delivered while the
974 * Return: 0 if the lock was successfully acquired or %-EINTR if a
977 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible() argument
981 if (__mutex_trylock_fast(lock)) in mutex_lock_interruptible()
984 return __mutex_lock_interruptible_slowpath(lock); in mutex_lock_interruptible()
990 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
991 * @lock: The mutex to be acquired.
993 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
998 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1001 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable() argument
1005 if (__mutex_trylock_fast(lock)) in mutex_lock_killable()
1008 return __mutex_lock_killable_slowpath(lock); in mutex_lock_killable()
1013 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1014 * @lock: The mutex to be acquired.
1016 * Lock the mutex like mutex_lock(). While the task is waiting for this
1022 void __sched mutex_lock_io(struct mutex *lock) in mutex_lock_io() argument
1027 mutex_lock(lock); in mutex_lock_io()
1033 __mutex_lock_slowpath(struct mutex *lock) in __mutex_lock_slowpath() argument
1035 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_slowpath()
1039 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath() argument
1041 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); in __mutex_lock_killable_slowpath()
1045 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath() argument
1047 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_interruptible_slowpath()
1051 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_slowpath() argument
1053 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, in __ww_mutex_lock_slowpath()
1058 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, in __ww_mutex_lock_interruptible_slowpath() argument
1061 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath()
1068 * mutex_trylock - try to acquire the mutex, without waiting
1069 * @lock: the mutex to be acquired
1081 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock() argument
1085 MUTEX_WARN_ON(lock->magic != lock); in mutex_trylock()
1087 locked = __mutex_trylock(lock); in mutex_trylock()
1089 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in mutex_trylock()
1097 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument
1101 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock()
1103 ww_mutex_set_context_fastpath(lock, ctx); in ww_mutex_lock()
1107 return __ww_mutex_lock_slowpath(lock, ctx); in ww_mutex_lock()
1112 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument
1116 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock_interruptible()
1118 ww_mutex_set_context_fastpath(lock, ctx); in ww_mutex_lock_interruptible()
1122 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); in ww_mutex_lock_interruptible()
1130 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1132 * @lock: the mutex to return holding if we dec to 0
1134 * return true and hold lock if we dec to 0, return false otherwise
1136 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock() argument
1139 if (atomic_add_unless(cnt, -1, 1)) in atomic_dec_and_mutex_lock()
1141 /* we might hit 0, so take the lock */ in atomic_dec_and_mutex_lock()
1142 mutex_lock(lock); in atomic_dec_and_mutex_lock()
1145 mutex_unlock(lock); in atomic_dec_and_mutex_lock()
1148 /* we hit 0, and we hold the lock */ in atomic_dec_and_mutex_lock()