/Linux-v4.19/drivers/gpu/drm/ttm/ |
D | ttm_lock.c | 46 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument 48 spin_lock_init(&lock->lock); in ttm_lock_init() 49 init_waitqueue_head(&lock->queue); in ttm_lock_init() 50 lock->rw = 0; in ttm_lock_init() 51 lock->flags = 0; in ttm_lock_init() 52 lock->kill_takers = false; in ttm_lock_init() 53 lock->signal = SIGKILL; in ttm_lock_init() 57 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument 59 spin_lock(&lock->lock); in ttm_read_unlock() 60 if (--lock->rw == 0) in ttm_read_unlock() [all …]
|
/Linux-v4.19/include/linux/ |
D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 27 #define ___LOCK(lock) \ argument 28 do { __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK(lock) \ argument 31 do { preempt_disable(); ___LOCK(lock); } while (0) 33 #define __LOCK_BH(lock) \ argument 34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 36 #define __LOCK_IRQ(lock) \ argument 37 do { local_irq_disable(); __LOCK(lock); } while (0) 39 #define __LOCK_IRQSAVE(lock, flags) \ argument [all …]
|
D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 25 __acquires(lock); 26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 27 __acquires(lock); [all …]
|
D | spinlock.h | 94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 96 # define raw_spin_lock_init(lock) \ argument 100 __raw_spin_lock_init((lock), #lock, &__key); \ 104 # define raw_spin_lock_init(lock) \ argument 105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 108 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument 111 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument 113 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument 172 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 173 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) argument [all …]
|
D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument 34 extern int do_raw_read_trylock(rwlock_t *lock); 35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); [all …]
|
D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
D | spinlock_up.h | 29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 31 lock->slock = 0; in arch_spin_lock() 35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 37 char oldval = lock->slock; in arch_spin_trylock() 39 lock->slock = 0; in arch_spin_trylock() 45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 48 lock->slock = 1; in arch_spin_unlock() 54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument 55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument 56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument [all …]
|
D | mutex.h | 73 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument 75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); in __mutex_owner() 96 extern void mutex_destroy(struct mutex *lock); 102 static inline void mutex_destroy(struct mutex *lock) {} in mutex_destroy() argument 138 extern void __mutex_init(struct mutex *lock, const char *name, 147 static inline bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument 149 return __mutex_owner(lock) != NULL; in mutex_is_locked() 157 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 158 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 160 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, [all …]
|
/Linux-v4.19/kernel/locking/ |
D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map(&lock->dep_map, name, key, 0); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rwlock_init() 42 lockdep_init_map(&lock->dep_map, name, key, 0); in __rwlock_init() [all …]
|
D | rtmutex.c | 52 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) in rt_mutex_set_owner() argument 56 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner() 59 lock->owner = (struct task_struct *)val; in rt_mutex_set_owner() 62 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) in clear_rt_mutex_waiters() argument 64 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters() 65 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters() 68 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) in fixup_rt_mutex_waiters() argument 70 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters() 72 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters() 152 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument [all …]
|
D | mutex.c | 39 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 41 atomic_long_set(&lock->owner, 0); in __mutex_init() 42 spin_lock_init(&lock->wait_lock); in __mutex_init() 43 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 45 osq_lock_init(&lock->osq); in __mutex_init() 48 debug_mutex_init(lock, name, key); in __mutex_init() 80 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner() argument 84 owner = atomic_long_read(&lock->owner); in __mutex_trylock_or_owner() 110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); in __mutex_trylock_or_owner() 123 static inline bool __mutex_trylock(struct mutex *lock) in __mutex_trylock() argument [all …]
|
D | spinlock.c | 61 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 65 if (likely(do_raw_##op##_trylock(lock))) \ 69 arch_##op##_relax(&lock->raw_lock); \ 73 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 80 if (likely(do_raw_##op##_trylock(lock))) \ 85 arch_##op##_relax(&lock->raw_lock); \ 91 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 93 _raw_##op##_lock_irqsave(lock); \ 96 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ 105 flags = _raw_##op##_lock_irqsave(lock); \ [all …]
|
D | qspinlock_paravirt.h | 83 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument 90 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock() 93 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 111 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 113 WRITE_ONCE(lock->pending, 1); in set_pending() 121 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument 123 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 124 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending() 128 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 130 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending() [all …]
|
/Linux-v4.19/arch/alpha/include/asm/ |
D | spinlock.h | 17 #define arch_spin_is_locked(x) ((x)->lock != 0) 19 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 21 return lock.lock == 0; in arch_spin_value_unlocked() 24 static inline void arch_spin_unlock(arch_spinlock_t * lock) in arch_spin_unlock() argument 27 lock->lock = 0; in arch_spin_unlock() 30 static inline void arch_spin_lock(arch_spinlock_t * lock) in arch_spin_lock() argument 46 : "=&r" (tmp), "=m" (lock->lock) in arch_spin_lock() 47 : "m"(lock->lock) : "memory"); in arch_spin_lock() 50 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 52 return !test_and_set_bit(0, &lock->lock); in arch_spin_trylock() [all …]
|
/Linux-v4.19/arch/hexagon/include/asm/ |
D | spinlock.h | 42 static inline void arch_read_lock(arch_rwlock_t *lock) in arch_read_lock() argument 51 : "r" (&lock->lock) in arch_read_lock() 57 static inline void arch_read_unlock(arch_rwlock_t *lock) in arch_read_unlock() argument 65 : "r" (&lock->lock) in arch_read_unlock() 72 static inline int arch_read_trylock(arch_rwlock_t *lock) in arch_read_trylock() argument 83 : "r" (&lock->lock) in arch_read_trylock() 90 static inline void arch_write_lock(arch_rwlock_t *lock) in arch_write_lock() argument 99 : "r" (&lock->lock) in arch_write_lock() 105 static inline int arch_write_trylock(arch_rwlock_t *lock) in arch_write_trylock() argument 116 : "r" (&lock->lock) in arch_write_trylock() [all …]
|
/Linux-v4.19/fs/ocfs2/dlm/ |
D | dlmast.c | 52 struct dlm_lock *lock); 53 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 64 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 67 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast() 69 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast() 71 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast() 73 if (lock->bast_pending && in dlm_should_cancel_bast() 74 list_empty(&lock->bast_list)) in dlm_should_cancel_bast() 78 if (lock->ml.type == LKM_EXMODE) in dlm_should_cancel_bast() 81 else if (lock->ml.type == LKM_NLMODE) in dlm_should_cancel_bast() [all …]
|
D | dlmlock.c | 62 struct dlm_lock *lock, int flags); 66 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 91 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 96 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 101 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 104 lock->ml.type)) in dlm_can_grant_new_lock() 120 struct dlm_lock *lock, int flags) in dlmlock_master() argument 125 mlog(0, "type=%d\n", lock->ml.type); in dlmlock_master() 132 lock->ml.node != dlm->node_num) { in dlmlock_master() 141 if (dlm_can_grant_new_lock(res, lock)) { in dlmlock_master() [all …]
|
/Linux-v4.19/arch/ia64/include/asm/ |
D | spinlock.h | 22 #define arch_spin_lock_init(x) ((x)->lock = 0) 43 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) in __ticket_spin_lock() argument 45 int *p = (int *)&lock->lock, ticket, serve; in __ticket_spin_lock() 63 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) in __ticket_spin_trylock() argument 65 int tmp = READ_ONCE(lock->lock); in __ticket_spin_trylock() 68 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; in __ticket_spin_trylock() 72 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) in __ticket_spin_unlock() argument 74 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; in __ticket_spin_unlock() 80 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) in __ticket_spin_is_locked() argument 82 long tmp = READ_ONCE(lock->lock); in __ticket_spin_is_locked() [all …]
|
/Linux-v4.19/drivers/md/persistent-data/ |
D | dm-block-manager.c | 41 spinlock_t lock; member 58 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument 64 if (lock->holders[i] == task) in __find_holder() 72 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument 74 unsigned h = __find_holder(lock, NULL); in __add_holder() 80 lock->holders[h] = task; in __add_holder() 83 t = lock->traces + h; in __add_holder() 86 t->entries = lock->entries[h]; in __add_holder() 93 static void __del_holder(struct block_lock *lock, struct task_struct *task) in __del_holder() argument 95 unsigned h = __find_holder(lock, task); in __del_holder() [all …]
|
/Linux-v4.19/arch/riscv/include/asm/ |
D | spinlock.h | 28 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) 30 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 32 smp_store_release(&lock->lock, 0); in arch_spin_unlock() 35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 42 : "=r" (busy), "+A" (lock->lock) in arch_spin_trylock() 49 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 52 if (arch_spin_is_locked(lock)) in arch_spin_lock() 55 if (arch_spin_trylock(lock)) in arch_spin_lock() 62 static inline void arch_read_lock(arch_rwlock_t *lock) in arch_read_lock() argument 73 : "+A" (lock->lock), "=&r" (tmp) in arch_read_lock() [all …]
|
/Linux-v4.19/drivers/gpu/drm/ |
D | drm_lock.c | 59 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local 63 old = *lock; in drm_lock_take() 71 prev = cmpxchg(lock, old, new); in drm_lock_take() 108 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_transfer() local 112 old = *lock; in drm_lock_transfer() 114 prev = cmpxchg(lock, old, new); in drm_lock_transfer() 123 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_legacy_lock_free() local 135 old = *lock; in drm_legacy_lock_free() 137 prev = cmpxchg(lock, old, new); in drm_legacy_lock_free() 164 struct drm_lock *lock = data; in drm_legacy_lock() local [all …]
|
/Linux-v4.19/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
D | locks.h | 26 static inline void lock_impl_lock(struct lock_impl *lock) in lock_impl_lock() argument 28 BUG_ON(pthread_mutex_lock(&lock->mutex)); in lock_impl_lock() 31 static inline void lock_impl_unlock(struct lock_impl *lock) in lock_impl_unlock() argument 33 BUG_ON(pthread_mutex_unlock(&lock->mutex)); in lock_impl_unlock() 36 static inline bool lock_impl_trylock(struct lock_impl *lock) in lock_impl_trylock() argument 38 int err = pthread_mutex_trylock(&lock->mutex); in lock_impl_trylock() 47 static inline void lock_impl_init(struct lock_impl *lock) in lock_impl_init() argument 49 pthread_mutex_init(&lock->mutex, NULL); in lock_impl_init() 61 static inline bool lock_impl_trylock(struct lock_impl *lock) in lock_impl_trylock() argument 65 return __sync_bool_compare_and_swap(&lock->locked, false, true); in lock_impl_trylock() [all …]
|
/Linux-v4.19/drivers/acpi/acpica/ |
D | utlock.c | 28 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_create_rw_lock() argument 32 lock->num_readers = 0; in acpi_ut_create_rw_lock() 33 status = acpi_os_create_mutex(&lock->reader_mutex); in acpi_ut_create_rw_lock() 38 status = acpi_os_create_mutex(&lock->writer_mutex); in acpi_ut_create_rw_lock() 42 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_delete_rw_lock() argument 45 acpi_os_delete_mutex(lock->reader_mutex); in acpi_ut_delete_rw_lock() 46 acpi_os_delete_mutex(lock->writer_mutex); in acpi_ut_delete_rw_lock() 48 lock->num_readers = 0; in acpi_ut_delete_rw_lock() 49 lock->reader_mutex = NULL; in acpi_ut_delete_rw_lock() 50 lock->writer_mutex = NULL; in acpi_ut_delete_rw_lock() [all …]
|
/Linux-v4.19/arch/powerpc/include/asm/ |
D | spinlock.h | 65 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 67 return lock.slock == 0; in arch_spin_value_unlocked() 70 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument 73 return !arch_spin_value_unlocked(*lock); in arch_spin_is_locked() 80 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) in __arch_spin_trylock() argument 94 : "r" (token), "r" (&lock->slock) in __arch_spin_trylock() 100 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 103 return __arch_spin_trylock(lock) == 0; in arch_spin_trylock() 123 extern void __spin_yield(arch_spinlock_t *lock); 124 extern void __rw_yield(arch_rwlock_t *lock); [all …]
|
/Linux-v4.19/tools/lib/lockdep/include/liblockdep/ |
D | rwlock.h | 21 static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock, in __rwlock_init() argument 26 lockdep_init_map(&lock->dep_map, name, key, 0); in __rwlock_init() 28 return pthread_rwlock_init(&lock->rwlock, attr); in __rwlock_init() 31 #define liblockdep_pthread_rwlock_init(lock, attr) \ argument 35 __rwlock_init((lock), #lock, &__key, (attr)); \ 38 static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) in liblockdep_pthread_rwlock_rdlock() argument 40 lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); in liblockdep_pthread_rwlock_rdlock() 41 return pthread_rwlock_rdlock(&lock->rwlock); in liblockdep_pthread_rwlock_rdlock() 45 static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock) in liblockdep_pthread_rwlock_unlock() argument 47 lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); in liblockdep_pthread_rwlock_unlock() [all …]
|