Lines Matching full:lock

26  * - try-lock semantics for readers and writers
33 * __btrfs_tree_read_lock - lock extent buffer for read
37 * This takes the read lock on the extent buffer, using the specified nesting
47 down_read_nested(&eb->lock, nest); in __btrfs_tree_read_lock()
58 * Try-lock for read.
64 if (down_read_trylock(&eb->lock)) { in btrfs_try_tree_read_lock()
73 * Try-lock for write.
79 if (down_write_trylock(&eb->lock)) { in btrfs_try_tree_write_lock()
88 * Release read lock.
94 up_read(&eb->lock); in btrfs_tree_read_unlock()
98 * __btrfs_tree_lock - lock eb for write
99 * @eb: the eb to lock
100 * @nest: the nesting to use for the lock
102 * Returns with the eb->lock write locked.
105 __acquires(&eb->lock) in __btrfs_tree_lock()
112 down_write_nested(&eb->lock, nest); in __btrfs_tree_lock()
123 * Release the write lock.
129 up_write(&eb->lock); in btrfs_tree_unlock()
136 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
160 * we end up with a lock on the root node.
162 * Return: root extent buffer with write lock held
181 * we end up with a lock on the root node.
183 * Return: root extent buffer with read lock held
204 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
208 * writer both race to acquire their respective sides of the lock the writer
209 * would yield its lock as soon as it detects a concurrent reader. Additionally
211 * acquire the lock.
214 int btrfs_drew_lock_init(struct btrfs_drew_lock *lock) in btrfs_drew_lock_init() argument
218 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); in btrfs_drew_lock_init()
222 atomic_set(&lock->readers, 0); in btrfs_drew_lock_init()
223 init_waitqueue_head(&lock->pending_readers); in btrfs_drew_lock_init()
224 init_waitqueue_head(&lock->pending_writers); in btrfs_drew_lock_init()
229 void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock) in btrfs_drew_lock_destroy() argument
231 percpu_counter_destroy(&lock->writers); in btrfs_drew_lock_destroy()
235 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) in btrfs_drew_try_write_lock() argument
237 if (atomic_read(&lock->readers)) in btrfs_drew_try_write_lock()
240 percpu_counter_inc(&lock->writers); in btrfs_drew_try_write_lock()
244 if (atomic_read(&lock->readers)) { in btrfs_drew_try_write_lock()
245 btrfs_drew_write_unlock(lock); in btrfs_drew_try_write_lock()
252 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) in btrfs_drew_write_lock() argument
255 if (btrfs_drew_try_write_lock(lock)) in btrfs_drew_write_lock()
257 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); in btrfs_drew_write_lock()
261 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) in btrfs_drew_write_unlock() argument
263 percpu_counter_dec(&lock->writers); in btrfs_drew_write_unlock()
264 cond_wake_up(&lock->pending_readers); in btrfs_drew_write_unlock()
267 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) in btrfs_drew_read_lock() argument
269 atomic_inc(&lock->readers); in btrfs_drew_read_lock()
279 wait_event(lock->pending_readers, in btrfs_drew_read_lock()
280 percpu_counter_sum(&lock->writers) == 0); in btrfs_drew_read_lock()
283 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) in btrfs_drew_read_unlock() argument
289 if (atomic_dec_and_test(&lock->readers)) in btrfs_drew_read_unlock()
290 wake_up(&lock->pending_writers); in btrfs_drew_read_unlock()