Home
last modified time | relevance | path

Searched full:lock (Results 1 – 25 of 6979) sorted by relevance

12345678910>>...280

/Linux-v5.15/include/linux/
Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
[all …]
Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \ argument
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \ argument
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \ argument
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \ argument
37 do { local_irq_disable(); __LOCK(lock); } while (0)
[all …]
Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
[all …]
Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
Dspinlock_rt.h10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument
23 rt_mutex_base_init(&(slock)->lock); \
31 rt_mutex_base_init(&(slock)->lock); \
35 extern void rt_spin_lock(spinlock_t *lock);
36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
38 extern void rt_spin_unlock(spinlock_t *lock);
39 extern void rt_spin_lock_unlock(spinlock_t *lock);
40 extern int rt_spin_trylock_bh(spinlock_t *lock);
[all …]
Dspinlock.h71 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
100 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
103 # define raw_spin_lock_init(lock) \ argument
107 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
111 # define raw_spin_lock_init(lock) \ argument
112 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
115 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument
118 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument
120 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument
125 * between program-order earlier lock acquisitions and program-order later
[all …]
Dlocal_lock.h8 * local_lock_init - Runtime initialize a lock instance
10 #define local_lock_init(lock) __local_lock_init(lock) argument
13 * local_lock - Acquire a per CPU local lock
14 * @lock: The lock variable
16 #define local_lock(lock) __local_lock(lock) argument
19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts
20 * @lock: The lock variable
22 #define local_lock_irq(lock) __local_lock_irq(lock) argument
25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable
27 * @lock: The lock variable
[all …]
Dspinlock_up.h29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument
31 lock->slock = 0; in arch_spin_lock()
35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
37 char oldval = lock->slock; in arch_spin_trylock()
39 lock->slock = 0; in arch_spin_trylock()
45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument
48 lock->slock = 1; in arch_spin_unlock()
54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument
55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument
[all …]
/Linux-v5.15/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
35 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
40 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
Drtmutex_api.c17 * Debug aware fast / slowpath lock,trylock,unlock
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument
29 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in __rt_mutex_lock_common()
30 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common()
32 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common()
44 * rt_mutex_lock_nested - lock a rt_mutex
46 * @lock: the rt_mutex to be locked
49 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument
51 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); in rt_mutex_lock_nested()
58 * rt_mutex_lock - lock a rt_mutex
[all …]
Dmutex.c43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
45 atomic_long_set(&lock->owner, 0); in __mutex_init()
46 raw_spin_lock_init(&lock->wait_lock); in __mutex_init()
47 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
49 osq_lock_init(&lock->osq); in __mutex_init()
52 debug_mutex_init(lock, name, key); in __mutex_init()
57 * @owner: contains: 'struct task_struct *' to the current lock owner,
62 * Bit1 indicates unlock needs to hand the lock to the top-waiter
76 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument
78 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
[all …]
Dspinlock.c35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
46 * Some architectures can relax in favour of the CPU owning the lock.
63 * This could be a long-held lock. We both prepare to spin for a long
65 * towards that other CPU that it should break the lock ASAP.
68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
72 if (likely(do_raw_##op##_trylock(lock))) \
76 arch_##op##_relax(&lock->raw_lock); \
80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
87 if (likely(do_raw_##op##_trylock(lock))) \
92 arch_##op##_relax(&lock->raw_lock); \
[all …]
Drtmutex.c34 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
40 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
45 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
50 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
64 * lock->owner state tracking:
66 * lock->owner holds the task_struct pointer of the owner. Bit 0
67 * is used to keep track of the "lock has waiters" state.
70 * NULL 0 lock is free (fast acquire possible)
71 * NULL 1 lock is free and has waiters and the top waiter
72 * is going to take the lock*
[all …]
Dww_mutex.h9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first()
14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first()
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument
24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next()
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument
34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev()
41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last()
46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last()
[all …]
/Linux-v5.15/drivers/gpu/drm/
Ddrm_lock.c50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
67 old = *lock; in drm_lock_take()
75 prev = cmpxchg(lock, old, new); in drm_lock_take()
82 DRM_ERROR("%d holds heavyweight lock\n", in drm_lock_take()
90 /* Have lock */ in drm_lock_take()
97 * This takes a lock forcibly and hands it to context. Should ONLY be used
[all …]
/Linux-v5.15/fs/ocfs2/dlm/
Ddlmast.c35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast()
52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast()
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast()
[all …]
Ddlmlock.c5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
66 /* Tell us whether we can grant a new lock request.
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock()
87 lock->ml.type)) in dlm_can_grant_new_lock()
94 /* performs lock creation at the lockres master site
[all …]
Ddlmconvert.c5 * underlying calls for lock conversion
38 * only one that holds a lock on exit (res->spinlock).
43 struct dlm_lock *lock, int flags,
48 struct dlm_lock *lock, int flags, int type);
61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument
72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master()
83 dlm_queue_ast(dlm, lock); in dlmconvert_master()
93 /* performs lock conversion at the lockres master site
96 * taken: takes and drops lock->spinlock
99 * call_ast: whether ast should be called for this lock
[all …]
/Linux-v5.15/include/asm-generic/
Dqrwlock.h3 * Queue read/write lock
24 #define _QW_LOCKED 0x0ff /* A writer holds the lock */
32 extern void queued_read_lock_slowpath(struct qrwlock *lock);
33 extern void queued_write_lock_slowpath(struct qrwlock *lock);
36 * queued_read_trylock - try to acquire read lock of a queue rwlock
37 * @lock : Pointer to queue rwlock structure
38 * Return: 1 if lock acquired, 0 if failed
40 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument
44 cnts = atomic_read(&lock->cnts); in queued_read_trylock()
46 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock()
[all …]
/Linux-v5.15/drivers/md/persistent-data/
Ddm-block-manager.c31 * trace is also emitted for the previous lock acquisition.
44 spinlock_t lock; member
60 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument
66 if (lock->holders[i] == task) in __find_holder()
73 /* call this *after* you increment lock->count */
74 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
76 unsigned h = __find_holder(lock, NULL); in __add_holder()
82 lock->holders[h] = task; in __add_holder()
85 t = lock->traces + h; in __add_holder()
90 /* call this *before* you decrement lock->count */
[all …]
/Linux-v5.15/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/
Dlocks.h20 /* Only use one lock mechanism. Select which one. */
26 static inline void lock_impl_lock(struct lock_impl *lock) in lock_impl_lock() argument
28 BUG_ON(pthread_mutex_lock(&lock->mutex)); in lock_impl_lock()
31 static inline void lock_impl_unlock(struct lock_impl *lock) in lock_impl_unlock() argument
33 BUG_ON(pthread_mutex_unlock(&lock->mutex)); in lock_impl_unlock()
36 static inline bool lock_impl_trylock(struct lock_impl *lock) in lock_impl_trylock() argument
38 int err = pthread_mutex_trylock(&lock->mutex); in lock_impl_trylock()
47 static inline void lock_impl_init(struct lock_impl *lock) in lock_impl_init() argument
49 pthread_mutex_init(&lock->mutex, NULL); in lock_impl_init()
55 /* Spinlock that assumes that it always gets the lock immediately. */
[all …]
/Linux-v5.15/fs/btrfs/
Dlocking.c26 * - try-lock semantics for readers and writers
33 * __btrfs_tree_read_lock - lock extent buffer for read
37 * This takes the read lock on the extent buffer, using the specified nesting
47 down_read_nested(&eb->lock, nest); in __btrfs_tree_read_lock()
58 * Try-lock for read.
64 if (down_read_trylock(&eb->lock)) { in btrfs_try_tree_read_lock()
73 * Try-lock for write.
79 if (down_write_trylock(&eb->lock)) { in btrfs_try_tree_write_lock()
88 * Release read lock.
94 up_read(&eb->lock); in btrfs_tree_read_unlock()
[all …]
/Linux-v5.15/arch/powerpc/include/asm/
Dsimple_spinlock.h6 * Simple spin lock operations.
34 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument
36 return lock.slock == 0; in arch_spin_value_unlocked()
39 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument
41 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
45 * This returns the old value in the lock, so we succeeded
46 * in getting the lock if the return value is 0.
48 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument
62 : "r" (token), "r" (&lock->slock) in __arch_spin_trylock()
68 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument
[all …]
/Linux-v5.15/Documentation/locking/
Drobust-futex-ABI.rst56 pointer to a single linked list of 'lock entries', one per lock,
58 to itself, 'head'. The last 'lock entry' points back to the 'head'.
61 address of the associated 'lock entry', plus or minus, of what will
62 be called the 'lock word', from that 'lock entry'. The 'lock word'
63 is always a 32 bit word, unlike the other words above. The 'lock
65 of the thread holding the lock in the bottom 30 bits. See further
69 the address of the 'lock entry', during list insertion and removal,
73 Each 'lock entry' on the single linked list starting at 'head' consists
74 of just a single word, pointing to the next 'lock entry', or back to
75 'head' if there are no more entries. In addition, nearby to each 'lock
[all …]
Dlockdep-design.rst8 Lock-class
15 tens of thousands of) instantiations. For example a lock in the inode
17 lock class.
19 The validator tracks the 'usage state' of lock-classes, and it tracks
20 the dependencies between different lock-classes. Lock usage indicates
21 how a lock is used with regard to its IRQ contexts, while lock
22 dependency can be understood as lock order, where L1 -> L2 suggests that
26 continuing effort to prove lock usages and dependencies are correct or
29 A lock-class's behavior is constructed by its instances collectively:
30 when the first instance of a lock-class is used after bootup the class
[all …]

12345678910>>...280