Lines Matching +full:unlock +full:- +full:keys

1 /* SPDX-License-Identifier: GPL-2.0 */
8 * At a high level, bcache's btree is relatively standard b+ tree. All keys and
13 * the key is the highest key in the child node - except that the highest key in
15 * of the child node - this would allow us to have variable sized btree nodes
21 * overlapping extents - when we read in a btree node from disk, the first thing
22 * we do is resort all the sets of keys with a mergesort, and in the same pass
35 * disk if necessary. This function is almost never called directly though - the
37 * unlock the node after the function returns.
39 * The root is special cased - it's taken out of the cache's lru (thus pinning
43 * points to - the btree_root() macro handles this.
49 * time, so there's a lock, implemented by a pointer to the btree_op closure -
57 * For writing, we have two btree_write structs embeddded in struct btree - one
60 * Writing is done with a single function - bch_btree_write() really serves two
62 * passing now = false, it merely indicates that the node is now dirty - calling
63 * it ensures that the dirty keys will be written at some point in the future.
68 * though - but it takes a refcount on the closure in struct btree_op you passed
77 * When traversing the btree, we may need write locks starting at some level -
86 * then it must restart from the root and take new locks - to do this it changes
87 * the lock field and returns -EINTR, which causes the btree_root() macro to
93 * placeholder key to detect races - otherwise, we could race with a write and
97 * For this we use a sequence number that write locks and unlocks increment - to
110 * c->prio_blocked because we can't write the gens until the new
135 struct btree_keys keys; member
137 /* For outstanding btree writes, used as a lock - protects write_idx */
153 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
156 { set_bit(BTREE_NODE_ ## flag, &b->flags); }
172 return b->writes + btree_node_write_idx(b); in btree_current_write()
177 return b->writes + (btree_node_write_idx(b) ^ 1); in btree_prev_write()
182 return b->keys.set->data; in btree_bset_first()
187 return bset_tree_last(&b->keys)->data; in btree_bset_last()
192 return bset_sector_offset(&b->keys, i) >> b->c->block_bits; in bset_block_offset()
197 atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16); in set_gc_sectors()
206 iter < ARRAY_SIZE((c)->bucket_hash); \
208 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
244 init_wait(&op->wait); in bch_btree_op_init()
245 op->lock = write_lock_level; in bch_btree_op_init()
250 w ? down_write_nested(&b->lock, level + 1) in rw_lock()
251 : down_read_nested(&b->lock, level + 1); in rw_lock()
253 b->seq++; in rw_lock()
259 b->seq++; in rw_unlock()
260 (w ? up_write : up_read)(&b->lock); in rw_unlock()
277 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
288 wake_up(&c->gc_wait); in wake_up_gc()
297 * Therefore sectors_to_gc is set to -1 here, before waking up in force_wake_up_gc()
301 * that c->sectors_to_gc being set to other positive value. So in force_wake_up_gc()
305 atomic_set(&c->sectors_to_gc, -1); in force_wake_up_gc()
310 * These macros are for recursing down the btree - they handle the details of
314 * op->lock determines whether we take a read or a write lock at a given depth.
316 * going to have to split), set op->lock and return -EINTR; btree_root() will
321 * btree - recurse down the btree on a specified key
329 int _r, l = (b)->level - 1; \
330 bool _w = l <= (op)->lock; \
331 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
342 * btree_root - call a function on the root of the btree
349 int _r = -EINTR; \
351 struct btree *_b = (c)->root; \
353 rw_lock(_w, _b, _b->level); \
354 if (_b == (c)->root && \
360 if (_r == -EINTR) \
362 } while (_r == -EINTR); \
364 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \