Lines Matching +full:blocking +full:- +full:io

1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/page-flags.h>
26 * - reader/writer exclusion
27 * - writer/writer exclusion
28 * - reader/reader sharing
29 * - spinning lock semantics
30 * - blocking lock semantics
31 * - try-lock semantics for readers and writers
32 * - one level nesting, allowing read lock to be taken by the same thread that
36 * related to the storage in the b-tree (keys, items, but not the individual
39 * is done by read-write spinlock and the blocking part is implemented using
42 * spinning semantics - the low-level rwlock is held so all other threads that
45 * blocking semantics - the low-level rwlock is not held but the counter
46 * denotes how many times the blocking lock was held;
53 * ---------
56 * removed from non-debug build to reduce extent_buffer size and for
61 * --------------
79 * Locking pattern - spinning
80 * --------------------------
82 * The simple locking scenario, the +--+ denotes the spinning section.
84 * +- btrfs_tree_lock
85 * | - extent_buffer::rwlock is held
86 * | - no heavy operations should happen, eg. IO, memory allocations, large
88 * +- btrfs_tree_unock
91 * Locking pattern - blocking
92 * --------------------------
94 * The blocking write uses the following scheme. The +--+ denotes the spinning
97 * +- btrfs_tree_lock
99 * +- btrfs_set_lock_blocking_write
101 * - allowed: IO, memory allocations, etc.
103 * -- btrfs_tree_unlock - note, no explicit unblocking necessary
106 * Blocking read is similar.
108 * +- btrfs_tree_read_lock
110 * +- btrfs_set_lock_blocking_read
112 * - heavy operations allowed
114 * +- btrfs_tree_read_unlock_blocking
116 * +- btrfs_tree_read_unlock
123 WARN_ON(eb->spinning_writers); in btrfs_assert_spinning_writers_get()
124 eb->spinning_writers++; in btrfs_assert_spinning_writers_get()
129 WARN_ON(eb->spinning_writers != 1); in btrfs_assert_spinning_writers_put()
130 eb->spinning_writers--; in btrfs_assert_spinning_writers_put()
135 WARN_ON(eb->spinning_writers); in btrfs_assert_no_spinning_writers()
140 atomic_inc(&eb->spinning_readers); in btrfs_assert_spinning_readers_get()
145 WARN_ON(atomic_read(&eb->spinning_readers) == 0); in btrfs_assert_spinning_readers_put()
146 atomic_dec(&eb->spinning_readers); in btrfs_assert_spinning_readers_put()
151 atomic_inc(&eb->read_locks); in btrfs_assert_tree_read_locks_get()
156 atomic_dec(&eb->read_locks); in btrfs_assert_tree_read_locks_put()
161 BUG_ON(!atomic_read(&eb->read_locks)); in btrfs_assert_tree_read_locked()
166 eb->write_locks++; in btrfs_assert_tree_write_locks_get()
171 eb->write_locks--; in btrfs_assert_tree_write_locks_put()
188 * Mark already held read lock as blocking. Can be nested in write lock by the
194 * The rwlock is released and blocking reader counter is increased.
204 if (eb->lock_recursed && current->pid == eb->lock_owner) in btrfs_set_lock_blocking_read()
207 atomic_inc(&eb->blocking_readers); in btrfs_set_lock_blocking_read()
209 read_unlock(&eb->lock); in btrfs_set_lock_blocking_read()
213 * Mark already held write lock as blocking.
218 * The rwlock is released and blocking writers is set.
228 if (eb->lock_recursed && current->pid == eb->lock_owner) in btrfs_set_lock_blocking_write()
230 if (eb->blocking_writers == 0) { in btrfs_set_lock_blocking_write()
233 WRITE_ONCE(eb->blocking_writers, 1); in btrfs_set_lock_blocking_write()
234 write_unlock(&eb->lock); in btrfs_set_lock_blocking_write()
239 * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
255 read_lock(&eb->lock); in __btrfs_tree_read_lock()
256 BUG_ON(eb->blocking_writers == 0 && in __btrfs_tree_read_lock()
257 current->pid == eb->lock_owner); in __btrfs_tree_read_lock()
258 if (eb->blocking_writers) { in __btrfs_tree_read_lock()
259 if (current->pid == eb->lock_owner) { in __btrfs_tree_read_lock()
261 * This extent is already write-locked by our thread. in __btrfs_tree_read_lock()
265 * (write-)locked tree. in __btrfs_tree_read_lock()
268 BUG_ON(eb->lock_recursed); in __btrfs_tree_read_lock()
269 eb->lock_recursed = true; in __btrfs_tree_read_lock()
270 read_unlock(&eb->lock); in __btrfs_tree_read_lock()
274 read_unlock(&eb->lock); in __btrfs_tree_read_lock()
275 wait_event(eb->write_lock_wq, in __btrfs_tree_read_lock()
276 READ_ONCE(eb->blocking_writers) == 0); in __btrfs_tree_read_lock()
291 * contending blocking writers. If there are, don't wait.
297 if (READ_ONCE(eb->blocking_writers)) in btrfs_tree_read_lock_atomic()
300 read_lock(&eb->lock); in btrfs_tree_read_lock_atomic()
302 if (READ_ONCE(eb->blocking_writers)) { in btrfs_tree_read_lock_atomic()
303 read_unlock(&eb->lock); in btrfs_tree_read_lock_atomic()
313 * Try-lock for read. Don't block or wait for contending writers.
319 if (READ_ONCE(eb->blocking_writers)) in btrfs_try_tree_read_lock()
322 if (!read_trylock(&eb->lock)) in btrfs_try_tree_read_lock()
326 if (READ_ONCE(eb->blocking_writers)) { in btrfs_try_tree_read_lock()
327 read_unlock(&eb->lock); in btrfs_try_tree_read_lock()
337 * Try-lock for write. May block until the lock is uncontended, but does not
344 if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) in btrfs_try_tree_write_lock()
347 write_lock(&eb->lock); in btrfs_try_tree_write_lock()
349 if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { in btrfs_try_tree_write_lock()
350 write_unlock(&eb->lock); in btrfs_try_tree_write_lock()
355 eb->lock_owner = current->pid; in btrfs_try_tree_write_lock()
375 if (eb->lock_recursed && current->pid == eb->lock_owner) { in btrfs_tree_read_unlock()
376 eb->lock_recursed = false; in btrfs_tree_read_unlock()
382 read_unlock(&eb->lock); in btrfs_tree_read_unlock()
386 * Release read lock, previously set to blocking by a pairing call to
401 if (eb->lock_recursed && current->pid == eb->lock_owner) { in btrfs_tree_read_unlock_blocking()
402 eb->lock_recursed = false; in btrfs_tree_read_unlock_blocking()
406 WARN_ON(atomic_read(&eb->blocking_readers) == 0); in btrfs_tree_read_unlock_blocking()
408 if (atomic_dec_and_test(&eb->blocking_readers)) in btrfs_tree_read_unlock_blocking()
409 cond_wake_up_nomb(&eb->read_lock_wq); in btrfs_tree_read_unlock_blocking()
414 * Lock for write. Wait for all blocking and spinning readers and writers. This
420 __acquires(&eb->lock) in __btrfs_tree_lock()
427 WARN_ON(eb->lock_owner == current->pid); in __btrfs_tree_lock()
429 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); in __btrfs_tree_lock()
430 wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0); in __btrfs_tree_lock()
431 write_lock(&eb->lock); in __btrfs_tree_lock()
433 if (atomic_read(&eb->blocking_readers) || in __btrfs_tree_lock()
434 READ_ONCE(eb->blocking_writers)) { in __btrfs_tree_lock()
435 write_unlock(&eb->lock); in __btrfs_tree_lock()
440 eb->lock_owner = current->pid; in __btrfs_tree_lock()
450 * Release the write lock, either blocking or spinning (ie. there's no need
451 * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
463 int blockers = eb->blocking_writers; in btrfs_tree_unlock()
469 eb->lock_owner = 0; in btrfs_tree_unlock()
475 WRITE_ONCE(eb->blocking_writers, 0); in btrfs_tree_unlock()
481 cond_wake_up(&eb->write_lock_wq); in btrfs_tree_unlock()
484 write_unlock(&eb->lock); in btrfs_tree_unlock()
489 * Set all locked nodes in the path to blocking locks. This should be done
497 if (!p->nodes[i] || !p->locks[i]) in btrfs_set_path_blocking()
501 * will bump the count of blocking holders and drop the in btrfs_set_path_blocking()
504 if (p->locks[i] == BTRFS_READ_LOCK) { in btrfs_set_path_blocking()
505 btrfs_set_lock_blocking_read(p->nodes[i]); in btrfs_set_path_blocking()
506 p->locks[i] = BTRFS_READ_LOCK_BLOCKING; in btrfs_set_path_blocking()
507 } else if (p->locks[i] == BTRFS_WRITE_LOCK) { in btrfs_set_path_blocking()
508 btrfs_set_lock_blocking_write(p->nodes[i]); in btrfs_set_path_blocking()
509 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; in btrfs_set_path_blocking()
527 if (path->keep_locks) in btrfs_unlock_up_safe()
531 if (!path->nodes[i]) in btrfs_unlock_up_safe()
533 if (!path->locks[i]) in btrfs_unlock_up_safe()
535 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); in btrfs_unlock_up_safe()
536 path->locks[i] = 0; in btrfs_unlock_up_safe()
553 if (eb == root->node) in btrfs_lock_root_node()
575 if (eb == root->node) in __btrfs_read_lock_root_node()
587 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
588 * where you want to provide A-B exclusion but not AA or BB.
601 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); in btrfs_drew_lock_init()
605 atomic_set(&lock->readers, 0); in btrfs_drew_lock_init()
606 init_waitqueue_head(&lock->pending_readers); in btrfs_drew_lock_init()
607 init_waitqueue_head(&lock->pending_writers); in btrfs_drew_lock_init()
614 percpu_counter_destroy(&lock->writers); in btrfs_drew_lock_destroy()
620 if (atomic_read(&lock->readers)) in btrfs_drew_try_write_lock()
623 percpu_counter_inc(&lock->writers); in btrfs_drew_try_write_lock()
627 if (atomic_read(&lock->readers)) { in btrfs_drew_try_write_lock()
640 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); in btrfs_drew_write_lock()
646 percpu_counter_dec(&lock->writers); in btrfs_drew_write_unlock()
647 cond_wake_up(&lock->pending_readers); in btrfs_drew_write_unlock()
652 atomic_inc(&lock->readers); in btrfs_drew_read_lock()
662 wait_event(lock->pending_readers, in btrfs_drew_read_lock()
663 percpu_counter_sum(&lock->writers) == 0); in btrfs_drew_read_lock()
672 if (atomic_dec_and_test(&lock->readers)) in btrfs_drew_read_unlock()
673 wake_up(&lock->pending_writers); in btrfs_drew_read_unlock()