Home
last modified time | relevance | path

Searched +full:lock +full:- +full:- +full:- +full:- (Results 1 – 25 of 1101) sorted by relevance

12345678910>>...45

/Linux-v6.1/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
35 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
40 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
Drtmutex_api.c1 // SPDX-License-Identifier: GPL-2.0-only
17 * Debug aware fast / slowpath lock,trylock,unlock
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument
30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common()
31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common()
33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common()
45 * rt_mutex_lock_nested - lock a rt_mutex
47 * @lock: the rt_mutex to be locked
50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument
52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested()
[all …]
Dmutex.c1 // SPDX-License-Identifier: GPL-2.0-only
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
19 * Also see Documentation/locking/mutex-design.rst.
34 #include <trace/events/lock.h>
46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
48 atomic_long_set(&lock->owner, 0); in __mutex_init()
49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
52 osq_lock_init(&lock->osq); in __mutex_init()
[all …]
Drtmutex.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
17 * See Documentation/locking/rt-mutex-design.rst for details.
27 #include <trace/events/lock.h>
36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
[all …]
Dspinlock.c1 // SPDX-License-Identifier: GPL-2.0
10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
33 * If lockdep is enabled then we use the non-preemption spin-ops
35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
46 * Some architectures can relax in favour of the CPU owning the lock.
63 * This could be a long-held lock. We both prepare to spin for a long
65 * towards that other CPU that it should break the lock ASAP.
68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
72 if (likely(do_raw_##op##_trylock(lock))) \
76 arch_##op##_relax(&lock->raw_lock); \
[all …]
Dww_mutex.h1 /* SPDX-License-Identifier: GPL-2.0-only */
9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first()
14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first()
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next()
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev()
41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last()
[all …]
Dqrwlock.c1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
15 #include <trace/events/lock.h>
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
21 void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument
24 * Readers come here when they cannot get the lock without waiting in queued_read_lock_slowpath()
28 * Readers in interrupt context will get the lock immediately in queued_read_lock_slowpath()
29 * if the writer is just waiting (not holding the lock yet), in queued_read_lock_slowpath()
30 * so spin with ACQUIRE semantics until the lock is available in queued_read_lock_slowpath()
[all …]
Dqspinlock_paravirt.h1 /* SPDX-License-Identifier: GPL-2.0 */
16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17 * pv_kick(cpu) -- wakes a suspended vcpu
30 * not running. The one lock stealing attempt allowed at slowpath entry
31 * mitigates the slight slowdown for non-overcommitted guest with this
32 * aggressive wait-early mechanism.
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
[all …]
/Linux-v6.1/include/linux/
Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
26 __acquires(lock);
27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
[all …]
Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
Dspinlock.h1 /* SPDX-License-Identifier: GPL-2.0 */
7 * include/linux/spinlock.h - generic spinlock/rwlock declarations
24 * (also included on UP-debug builds:)
35 * (which is an empty structure on non-debug builds)
44 * builds. (which are NOPs on non-debug, non-preempt
47 * (included on UP-non-debug builds:)
71 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
91 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
100 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
103 # define raw_spin_lock_init(lock) \ argument
[all …]
Dspinlock_rt.h1 // SPDX-License-Identifier: GPL-2.0-only
10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument
23 rt_mutex_base_init(&(slock)->lock); \
31 rt_mutex_base_init(&(slock)->lock); \
35 extern void rt_spin_lock(spinlock_t *lock);
36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
38 extern void rt_spin_unlock(spinlock_t *lock);
39 extern void rt_spin_lock_unlock(spinlock_t *lock);
[all …]
Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 extern int do_raw_read_trylock(rwlock_t *lock);
34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
36 extern int do_raw_write_trylock(rwlock_t *lock);
[all …]
Dlocal_lock.h1 /* SPDX-License-Identifier: GPL-2.0 */
8 * local_lock_init - Runtime initialize a lock instance
10 #define local_lock_init(lock) __local_lock_init(lock) argument
13 * local_lock - Acquire a per CPU local lock
14 * @lock: The lock variable
16 #define local_lock(lock) __local_lock(lock) argument
19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts
20 * @lock: The lock variable
22 #define local_lock_irq(lock) __local_lock_irq(lock) argument
25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable
[all …]
Dlockdep.h1 /* SPDX-License-Identifier: GPL-2.0 */
8 * see Documentation/locking/lockdep-design.rst for more details.
41 to->class_cache[i] = NULL; in lockdep_copy_map()
45 * Every lock has a list of other locks that were taken after it.
56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */
60 * The parent field is used to implement breadth-first search, and the
61 * bit 0 is reused to indicate if the lock has been accessed in BFS.
67 * struct lock_chain - lock dependency chain record
72 * @entry: the collided lock chains in lock_chain hash list
87 #define INITIAL_CHAIN_KEY -1
[all …]
Dmutex.h1 /* SPDX-License-Identifier: GPL-2.0 */
38 * - only one task can hold the mutex at a time
39 * - only the owner can unlock the mutex
40 * - multiple unlocks are not permitted
41 * - recursive locking is not permitted
42 * - a mutex object must be initialized via the API
43 * - a mutex object must not be initialized via memset or copying
44 * - task may not exit with mutex held
45 * - memory areas where held locks reside must not be freed
46 * - held mutexes must not be reinitialized
[all …]
/Linux-v6.1/drivers/gpu/drm/
Ddrm_lock.c50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
65 spin_lock_bh(&lock_data->spinlock); in drm_lock_take()
67 old = *lock; in drm_lock_take()
72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? in drm_lock_take()
75 prev = cmpxchg(lock, old, new); in drm_lock_take()
77 spin_unlock_bh(&lock_data->spinlock); in drm_lock_take()
[all …]
/Linux-v6.1/fs/ocfs2/dlm/
Ddlmast.c1 // SPDX-License-Identifier: GPL-2.0-or-later
35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
45 * asts out-of-band (not waiting for dlm_thread) in order to
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
49 assert_spin_locked(&dlm->ast_lock); in dlm_should_cancel_bast()
[all …]
Ddlmlock.c1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
57 return -ENOMEM; in dlm_init_lock_cache()
66 /* Tell us whether we can grant a new lock request.
68 * caller needs: res->spinlock
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
78 list_for_each_entry(tmplock, &res->granted, list) { in dlm_can_grant_new_lock()
[all …]
Ddlmconvert.c1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * underlying calls for lock conversion
37 * needs a spinlock held on entry (res->spinlock) and it is the
38 * only one that holds a lock on exit (res->spinlock).
43 struct dlm_lock *lock, int flags,
48 struct dlm_lock *lock, int flags, int type);
55 * taken: takes and drops res->spinlock
61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument
66 spin_lock(&res->spinlock); in dlmconvert_master()
70 res->state |= DLM_LOCK_RES_IN_PROGRESS; in dlmconvert_master()
[all …]
/Linux-v6.1/drivers/md/persistent-data/
Ddm-block-manager.c6 #include "dm-block-manager.h"
7 #include "dm-persistent-data-internal.h"
9 #include <linux/dm-bufio.h>
14 #include <linux/device-mapper.h>
20 /*----------------------------------------------------------------*/
31 * trace is also emitted for the previous lock acquisition.
44 spinlock_t lock; member
60 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument
66 if (lock->holders[i] == task) in __find_holder()
73 /* call this *after* you increment lock->count */
[all …]
/Linux-v6.1/include/asm-generic/
Dqrwlock.h1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Queue read/write lock
7 * asm-generic/spinlock.h meets these requirements.
9 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
20 #include <asm-generic/qrwlock_types.h>
28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */
36 extern void queued_read_lock_slowpath(struct qrwlock *lock);
37 extern void queued_write_lock_slowpath(struct qrwlock *lock);
40 * queued_read_trylock - try to acquire read lock of a queued rwlock
41 * @lock : Pointer to queued rwlock structure
[all …]
/Linux-v6.1/Documentation/locking/
Dlockdep-design.rst8 Lock-class
9 ----------
15 tens of thousands of) instantiations. For example a lock in the inode
17 lock class.
19 The validator tracks the 'usage state' of lock-classes, and it tracks
20 the dependencies between different lock-classes. Lock usage indicates
21 how a lock is used with regard to its IRQ contexts, while lock
22 dependency can be understood as lock order, where L1 -> L2 suggests that
26 continuing effort to prove lock usages and dependencies are correct or
29 A lock-class's behavior is constructed by its instances collectively:
[all …]
/Linux-v6.1/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/
Dlocks.h1 /* SPDX-License-Identifier: GPL-2.0 */
20 /* Only use one lock mechanism. Select which one. */
26 static inline void lock_impl_lock(struct lock_impl *lock) in lock_impl_lock() argument
28 BUG_ON(pthread_mutex_lock(&lock->mutex)); in lock_impl_lock()
31 static inline void lock_impl_unlock(struct lock_impl *lock) in lock_impl_unlock() argument
33 BUG_ON(pthread_mutex_unlock(&lock->mutex)); in lock_impl_unlock()
36 static inline bool lock_impl_trylock(struct lock_impl *lock) in lock_impl_trylock() argument
38 int err = pthread_mutex_trylock(&lock->mutex); in lock_impl_trylock()
47 static inline void lock_impl_init(struct lock_impl *lock) in lock_impl_init() argument
49 pthread_mutex_init(&lock->mutex, NULL); in lock_impl_init()
[all …]
/Linux-v6.1/arch/powerpc/include/asm/
Dsimple_spinlock.h1 /* SPDX-License-Identifier: GPL-2.0-or-later */
6 * Simple spin lock operations.
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
21 #include <asm/ppc-opcode.h>
26 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
28 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
34 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
36 return lock.slock == 0; in arch_spin_value_unlocked()
39 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
41 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
[all …]

12345678910>>...45