Lines Matching refs:b
102 #define insert_lock(s, b) ((b)->level <= (s)->lock) argument
105 static inline struct bset *write_block(struct btree *b) in write_block() argument
107 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); in write_block()
110 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() argument
113 if (b->level && b->keys.nsets) in bch_btree_init_next()
114 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
116 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
118 if (b->written < btree_blocks(b)) in bch_btree_init_next()
119 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
120 bset_magic(&b->c->cache->sb)); in bch_btree_init_next()
137 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() argument
139 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
146 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() argument
149 struct bset *i = btree_bset_first(b); in bch_btree_node_read_done()
157 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
158 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
162 iter->b = &b->keys; in bch_btree_node_read_done()
169 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
170 i = write_block(b)) { in bch_btree_node_read_done()
176 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > in bch_btree_node_read_done()
177 btree_blocks(b)) in bch_btree_node_read_done()
181 if (i->magic != bset_magic(&b->c->cache->sb)) in bch_btree_node_read_done()
191 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
197 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
202 b->written += set_blocks(i, block_bytes(b->c->cache)); in bch_btree_node_read_done()
206 for (i = write_block(b); in bch_btree_node_read_done()
207 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
208 i = ((void *) i) + block_bytes(b->c->cache)) in bch_btree_node_read_done()
209 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
212 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
214 i = b->keys.set[0].data; in bch_btree_node_read_done()
216 if (b->keys.set[0].size && in bch_btree_node_read_done()
217 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
220 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
221 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
222 bset_magic(&b->c->cache->sb)); in bch_btree_node_read_done()
224 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
227 set_btree_node_io_error(b); in bch_btree_node_read_done()
228 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
229 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
230 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
241 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() argument
247 trace_bcache_btree_read(b); in bch_btree_node_read()
251 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
252 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
257 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
259 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
263 set_btree_node_io_error(b); in bch_btree_node_read()
265 bch_bbio_free(bio, b->c); in bch_btree_node_read()
267 if (btree_node_io_error(b)) in bch_btree_node_read()
270 bch_btree_node_read_done(b); in bch_btree_node_read()
271 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
275 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
276 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
279 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() argument
282 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
283 wake_up_allocators(b->c); in btree_complete_write()
287 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
296 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock() local
298 up(&b->io_mutex); in btree_node_write_unlock()
303 struct btree *b = container_of(cl, struct btree, io); in __btree_node_write_done() local
304 struct btree_write *w = btree_prev_write(b); in __btree_node_write_done()
306 bch_bbio_free(b->bio, b->c); in __btree_node_write_done()
307 b->bio = NULL; in __btree_node_write_done()
308 btree_complete_write(b, w); in __btree_node_write_done()
310 if (btree_node_dirty(b)) in __btree_node_write_done()
311 schedule_delayed_work(&b->work, 30 * HZ); in __btree_node_write_done()
318 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_done() local
320 bio_free_pages(b->bio); in btree_node_write_done()
327 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() local
330 set_btree_node_io_error(b); in btree_node_write_endio()
332 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
336 static void do_btree_node_write(struct btree *b) in do_btree_node_write() argument
338 struct closure *cl = &b->io; in do_btree_node_write()
339 struct bset *i = btree_bset_last(b); in do_btree_node_write()
343 i->csum = btree_csum_set(b, i); in do_btree_node_write()
345 BUG_ON(b->bio); in do_btree_node_write()
346 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
348 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
349 b->bio->bi_private = cl; in do_btree_node_write()
350 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); in do_btree_node_write()
351 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; in do_btree_node_write()
352 bch_bio_map(b->bio, i); in do_btree_node_write()
369 bkey_copy(&k.key, &b->key); in do_btree_node_write()
371 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
373 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
378 bio_for_each_segment_all(bv, b->bio, iter_all) { in do_btree_node_write()
383 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
391 b->bio->bi_vcnt = 0; in do_btree_node_write()
392 bch_bio_map(b->bio, i); in do_btree_node_write()
394 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
401 void __bch_btree_node_write(struct btree *b, struct closure *parent) in __bch_btree_node_write() argument
403 struct bset *i = btree_bset_last(b); in __bch_btree_node_write()
405 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
407 trace_bcache_btree_write(b); in __bch_btree_node_write()
410 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
411 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
412 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
413 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
415 cancel_delayed_work(&b->work); in __bch_btree_node_write()
418 down(&b->io_mutex); in __bch_btree_node_write()
419 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
421 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
422 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
424 do_btree_node_write(b); in __bch_btree_node_write()
426 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, in __bch_btree_node_write()
427 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); in __bch_btree_node_write()
429 b->written += set_blocks(i, block_bytes(b->c->cache)); in __bch_btree_node_write()
432 void bch_btree_node_write(struct btree *b, struct closure *parent) in bch_btree_node_write() argument
434 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
436 lockdep_assert_held(&b->lock); in bch_btree_node_write()
438 __bch_btree_node_write(b, parent); in bch_btree_node_write()
444 if (nsets && !b->keys.nsets) in bch_btree_node_write()
445 bch_btree_verify(b); in bch_btree_node_write()
447 bch_btree_init_next(b); in bch_btree_node_write()
450 static void bch_btree_node_write_sync(struct btree *b) in bch_btree_node_write_sync() argument
456 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
457 bch_btree_node_write(b, &cl); in bch_btree_node_write_sync()
458 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
465 struct btree *b = container_of(to_delayed_work(w), struct btree, work); in btree_node_write_work() local
467 mutex_lock(&b->write_lock); in btree_node_write_work()
468 if (btree_node_dirty(b)) in btree_node_write_work()
469 __bch_btree_node_write(b, NULL); in btree_node_write_work()
470 mutex_unlock(&b->write_lock); in btree_node_write_work()
473 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) in bch_btree_leaf_dirty() argument
475 struct bset *i = btree_bset_last(b); in bch_btree_leaf_dirty()
476 struct btree_write *w = btree_current_write(b); in bch_btree_leaf_dirty()
478 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
480 BUG_ON(!b->written); in bch_btree_leaf_dirty()
483 if (!btree_node_dirty(b)) in bch_btree_leaf_dirty()
484 schedule_delayed_work(&b->work, 30 * HZ); in bch_btree_leaf_dirty()
486 set_btree_node_dirty(b); in bch_btree_leaf_dirty()
495 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
509 bch_btree_node_write(b, NULL); in bch_btree_leaf_dirty()
522 static void mca_data_free(struct btree *b) in mca_data_free() argument
524 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
526 bch_btree_keys_free(&b->keys); in mca_data_free()
528 b->c->btree_cache_used--; in mca_data_free()
529 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
532 static void mca_bucket_free(struct btree *b) in mca_bucket_free() argument
534 BUG_ON(btree_node_dirty(b)); in mca_bucket_free()
536 b->key.ptr[0] = 0; in mca_bucket_free()
537 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
538 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
546 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) in mca_data_alloc() argument
548 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
550 ilog2(b->c->btree_pages), in mca_data_alloc()
553 b->c->btree_cache_used++; in mca_data_alloc()
554 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
556 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
567 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc() local
569 if (!b) in mca_bucket_alloc()
572 init_rwsem(&b->lock); in mca_bucket_alloc()
573 lockdep_set_novalidate_class(&b->lock); in mca_bucket_alloc()
574 mutex_init(&b->write_lock); in mca_bucket_alloc()
575 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
576 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
577 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
578 b->c = c; in mca_bucket_alloc()
579 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
581 mca_data_alloc(b, k, gfp); in mca_bucket_alloc()
582 return b; in mca_bucket_alloc()
585 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) in mca_reap() argument
590 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
592 if (!down_write_trylock(&b->lock)) in mca_reap()
595 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
597 if (b->keys.page_order < min_order) in mca_reap()
601 if (btree_node_dirty(b)) in mca_reap()
604 if (down_trylock(&b->io_mutex)) in mca_reap()
606 up(&b->io_mutex); in mca_reap()
615 mutex_lock(&b->write_lock); in mca_reap()
621 if (btree_node_journal_flush(b)) { in mca_reap()
622 pr_debug("bnode %p is flushing by journal, retry\n", b); in mca_reap()
623 mutex_unlock(&b->write_lock); in mca_reap()
628 if (btree_node_dirty(b)) in mca_reap()
629 __bch_btree_node_write(b, &cl); in mca_reap()
630 mutex_unlock(&b->write_lock); in mca_reap()
635 down(&b->io_mutex); in mca_reap()
636 up(&b->io_mutex); in mca_reap()
640 rw_unlock(true, b); in mca_reap()
648 struct btree *b, *t; in bch_mca_scan() local
679 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
683 if (!mca_reap(b, 0, false)) { in bch_mca_scan()
684 mca_data_free(b); in bch_mca_scan()
685 rw_unlock(true, b); in bch_mca_scan()
692 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in bch_mca_scan()
696 if (!mca_reap(b, 0, false)) { in bch_mca_scan()
697 mca_bucket_free(b); in bch_mca_scan()
698 mca_data_free(b); in bch_mca_scan()
699 rw_unlock(true, b); in bch_mca_scan()
727 struct btree *b; in bch_btree_cache_free() local
748 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
755 if (btree_node_dirty(b)) { in bch_btree_cache_free()
756 btree_complete_write(b, btree_current_write(b)); in bch_btree_cache_free()
757 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
759 mca_data_free(b); in bch_btree_cache_free()
763 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
765 list_del(&b->list); in bch_btree_cache_free()
766 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
767 kfree(b); in bch_btree_cache_free()
829 struct btree *b; in mca_find() local
832 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
833 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
835 b = NULL; in mca_find()
838 return b; in mca_find()
861 struct btree *b; in mca_cannibalize() local
868 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
869 if (!mca_reap(b, btree_order(k), false)) in mca_cannibalize()
870 return b; in mca_cannibalize()
872 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
873 if (!mca_reap(b, btree_order(k), true)) in mca_cannibalize()
874 return b; in mca_cannibalize()
899 struct btree *b; in mca_alloc() local
911 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
912 if (!mca_reap(b, btree_order(k), false)) in mca_alloc()
918 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
919 if (!mca_reap(b, 0, false)) { in mca_alloc()
920 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
921 if (!b->keys.set[0].data) in mca_alloc()
927 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
928 if (!b) in mca_alloc()
931 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
932 if (!b->keys.set->data) in mca_alloc()
935 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
937 bkey_copy(&b->key, k); in mca_alloc()
938 list_move(&b->list, &c->btree_cache); in mca_alloc()
939 hlist_del_init_rcu(&b->hash); in mca_alloc()
940 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
942 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
943 b->parent = (void *) ~0UL; in mca_alloc()
944 b->flags = 0; in mca_alloc()
945 b->written = 0; in mca_alloc()
946 b->level = level; in mca_alloc()
948 if (!b->level) in mca_alloc()
949 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
950 &b->c->expensive_debug_checks); in mca_alloc()
952 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
953 &b->c->expensive_debug_checks); in mca_alloc()
955 return b; in mca_alloc()
957 if (b) in mca_alloc()
958 rw_unlock(true, b); in mca_alloc()
960 b = mca_cannibalize(c, op, k); in mca_alloc()
961 if (!IS_ERR(b)) in mca_alloc()
964 return b; in mca_alloc()
981 struct btree *b; in bch_btree_node_get() local
985 b = mca_find(c, k); in bch_btree_node_get()
987 if (!b) { in bch_btree_node_get()
992 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
995 if (!b) in bch_btree_node_get()
997 if (IS_ERR(b)) in bch_btree_node_get()
998 return b; in bch_btree_node_get()
1000 bch_btree_node_read(b); in bch_btree_node_get()
1003 downgrade_write(&b->lock); in bch_btree_node_get()
1005 rw_lock(write, b, level); in bch_btree_node_get()
1006 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1007 rw_unlock(write, b); in bch_btree_node_get()
1010 BUG_ON(b->level != level); in bch_btree_node_get()
1013 if (btree_node_io_error(b)) { in bch_btree_node_get()
1014 rw_unlock(write, b); in bch_btree_node_get()
1018 BUG_ON(!b->written); in bch_btree_node_get()
1020 b->parent = parent; in bch_btree_node_get()
1022 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1023 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1024 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1027 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1028 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1030 return b; in bch_btree_node_get()
1035 struct btree *b; in btree_node_prefetch() local
1038 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1041 if (!IS_ERR_OR_NULL(b)) { in btree_node_prefetch()
1042 b->parent = parent; in btree_node_prefetch()
1043 bch_btree_node_read(b); in btree_node_prefetch()
1044 rw_unlock(true, b); in btree_node_prefetch()
1050 static void btree_node_free(struct btree *b) in btree_node_free() argument
1052 trace_bcache_btree_node_free(b); in btree_node_free()
1054 BUG_ON(b == b->c->root); in btree_node_free()
1057 mutex_lock(&b->write_lock); in btree_node_free()
1064 if (btree_node_journal_flush(b)) { in btree_node_free()
1065 mutex_unlock(&b->write_lock); in btree_node_free()
1066 pr_debug("bnode %p journal_flush set, retry\n", b); in btree_node_free()
1071 if (btree_node_dirty(b)) { in btree_node_free()
1072 btree_complete_write(b, btree_current_write(b)); in btree_node_free()
1073 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1076 mutex_unlock(&b->write_lock); in btree_node_free()
1078 cancel_delayed_work(&b->work); in btree_node_free()
1080 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1081 bch_bucket_free(b->c, &b->key); in btree_node_free()
1082 mca_bucket_free(b); in btree_node_free()
1083 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1091 struct btree *b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc() local
1101 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1102 if (IS_ERR(b)) in __bch_btree_node_alloc()
1105 if (!b) { in __bch_btree_node_alloc()
1111 b->parent = parent; in __bch_btree_node_alloc()
1112 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1116 trace_bcache_btree_node_alloc(b); in __bch_btree_node_alloc()
1117 return b; in __bch_btree_node_alloc()
1124 return b; in __bch_btree_node_alloc()
1134 static struct btree *btree_node_alloc_replacement(struct btree *b, in btree_node_alloc_replacement() argument
1137 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1141 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1142 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1149 static void make_btree_freeing_key(struct btree *b, struct bkey *k) in make_btree_freeing_key() argument
1153 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1155 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1157 bkey_copy(k, &b->key); in make_btree_freeing_key()
1162 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), in make_btree_freeing_key()
1163 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1165 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1168 static int btree_check_reserve(struct btree *b, struct btree_op *op) in btree_check_reserve() argument
1170 struct cache_set *c = b->c; in btree_check_reserve()
1172 unsigned int reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1186 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1243 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) argument
1252 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key() local
1254 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1257 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1258 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1259 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1270 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) in btree_gc_mark_node() argument
1280 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1281 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node()
1284 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1294 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1296 bset_written(&b->keys, t) && in btree_gc_mark_node()
1297 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1298 b, "found short btree key in gc"); in btree_gc_mark_node()
1300 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1315 struct btree *b; member
1319 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1324 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, in btree_gc_coalesce() argument
1335 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1341 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) in btree_gc_coalesce()
1344 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1347 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1348 block_bytes(b->c->cache)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1352 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); in btree_gc_coalesce()
1363 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1382 block_bytes(b->c->cache)) > blocks) in btree_gc_coalesce()
1398 block_bytes(b->c->cache)) > in btree_gc_coalesce()
1404 last = &r->b->key; in btree_gc_coalesce()
1407 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1447 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1450 make_btree_freeing_key(r[i].b, keylist.top); in btree_gc_coalesce()
1454 bch_btree_insert_node(b, op, &keylist, NULL, NULL); in btree_gc_coalesce()
1458 btree_node_free(r[i].b); in btree_gc_coalesce()
1459 rw_unlock(true, r[i].b); in btree_gc_coalesce()
1461 r[i].b = new_nodes[i]; in btree_gc_coalesce()
1465 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1484 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1495 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, in btree_gc_rewrite_node() argument
1501 if (btree_check_reserve(b, NULL)) in btree_gc_rewrite_node()
1507 if (btree_check_reserve(b, NULL)) { in btree_gc_rewrite_node()
1521 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1531 static unsigned int btree_gc_count_keys(struct btree *b) in btree_gc_count_keys() argument
1537 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1569 static int btree_gc_recurse(struct btree *b, struct btree_op *op, in btree_gc_recurse() argument
1579 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1582 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1585 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1587 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1588 true, b); in btree_gc_recurse()
1589 if (IS_ERR(r->b)) { in btree_gc_recurse()
1590 ret = PTR_ERR(r->b); in btree_gc_recurse()
1594 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1596 ret = btree_gc_coalesce(b, op, gc, r); in btree_gc_recurse()
1601 if (!last->b) in btree_gc_recurse()
1604 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1605 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1607 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1612 if (last->b->level) { in btree_gc_recurse()
1613 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1618 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1624 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1625 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1626 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1627 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1628 rw_unlock(true, last->b); in btree_gc_recurse()
1632 r->b = NULL; in btree_gc_recurse()
1634 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1635 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1648 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1649 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1650 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1651 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1652 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1653 rw_unlock(true, i->b); in btree_gc_recurse()
1659 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, in bch_btree_gc_root() argument
1666 should_rewrite = btree_gc_mark_node(b, gc); in bch_btree_gc_root()
1668 n = btree_node_alloc_replacement(b, NULL); in bch_btree_gc_root()
1674 btree_node_free(b); in bch_btree_gc_root()
1681 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1683 if (b->level) { in bch_btree_gc_root()
1684 ret = btree_gc_recurse(b, op, writes, gc); in bch_btree_gc_root()
1689 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1697 struct bucket *b; in btree_gc_start() local
1708 for_each_bucket(b, ca) { in btree_gc_start()
1709 b->last_gc = b->gen; in btree_gc_start()
1710 if (!atomic_read(&b->pin)) { in btree_gc_start()
1711 SET_GC_MARK(b, 0); in btree_gc_start()
1712 SET_GC_SECTORS_USED(b, 0); in btree_gc_start()
1721 struct bucket *b; in bch_btree_gc_finish() local
1769 for_each_bucket(b, ca) { in bch_btree_gc_finish()
1770 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1772 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1775 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); in bch_btree_gc_finish()
1777 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) in bch_btree_gc_finish()
1871 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) in bch_btree_check_recurse() argument
1877 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1878 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1880 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1882 if (b->level) { in bch_btree_check_recurse()
1883 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1886 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1889 btree_node_prefetch(b, k); in bch_btree_check_recurse()
1894 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1898 ret = bcache_btree(check_recurse, p, b, op); in bch_btree_check_recurse()
2080 struct bucket *b; in bch_initial_gc_finish() local
2095 for_each_bucket(b, ca) { in bch_initial_gc_finish()
2100 if (bch_can_invalidate_bucket(ca, b) && in bch_initial_gc_finish()
2101 !GC_MARK(b)) { in bch_initial_gc_finish()
2102 __bch_invalidate_one_bucket(ca, b); in bch_initial_gc_finish()
2104 b - ca->buckets)) in bch_initial_gc_finish()
2106 b - ca->buckets); in bch_initial_gc_finish()
2115 static bool btree_insert_key(struct btree *b, struct bkey *k, in btree_insert_key() argument
2120 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
2122 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2124 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2127 trace_bcache_btree_insert_key(b, k, replace_key != NULL, in btree_insert_key()
2134 static size_t insert_u64s_remaining(struct btree *b) in insert_u64s_remaining() argument
2136 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2141 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2147 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, in bch_btree_insert_keys() argument
2152 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2157 if (bkey_u64s(k) > insert_u64s_remaining(b)) in bch_btree_insert_keys()
2160 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2161 if (!b->level) in bch_btree_insert_keys()
2162 bkey_put(b->c, k); in bch_btree_insert_keys()
2164 ret |= btree_insert_key(b, k, replace_key); in bch_btree_insert_keys()
2166 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2170 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
2171 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2173 ret |= btree_insert_key(b, &temp.key, replace_key); in bch_btree_insert_keys()
2183 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
2185 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2189 static int btree_split(struct btree *b, struct btree_op *op, in btree_split() argument
2202 if (btree_check_reserve(b, op)) { in btree_split()
2203 if (!b->level) in btree_split()
2209 n1 = btree_node_alloc_replacement(b, op); in btree_split()
2214 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; in btree_split()
2219 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2221 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2225 if (!b->parent) { in btree_split()
2226 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2256 bkey_copy_key(&n2->key, &b->key); in btree_split()
2263 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2284 } else if (!b->parent) { in btree_split()
2291 make_btree_freeing_key(b, parent_keys.top); in btree_split()
2294 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2298 btree_node_free(b); in btree_split()
2301 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2305 bkey_put(b->c, &n2->key); in btree_split()
2309 bkey_put(b->c, &n1->key); in btree_split()
2313 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2323 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, in bch_btree_insert_node() argument
2330 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2334 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2336 if (write_block(b) != btree_bset_last(b) && in bch_btree_insert_node()
2337 b->keys.last_set_unwritten) in bch_btree_insert_node()
2338 bch_btree_init_next(b); /* just wrote a set */ in bch_btree_insert_node()
2340 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { in bch_btree_insert_node()
2341 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2345 BUG_ON(write_block(b) != btree_bset_last(b)); in bch_btree_insert_node()
2347 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { in bch_btree_insert_node()
2348 if (!b->level) in bch_btree_insert_node()
2349 bch_btree_leaf_dirty(b, journal_ref); in bch_btree_insert_node()
2351 bch_btree_node_write(b, &cl); in bch_btree_insert_node()
2354 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2362 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2364 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2365 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2369 int ret = btree_split(b, op, insert_keys, replace_key); in bch_btree_insert_node()
2379 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, in bch_btree_insert_check_key() argument
2383 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2384 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2391 rw_unlock(false, b); in bch_btree_insert_check_key()
2392 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2394 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2395 b->seq != seq + 1) { in bch_btree_insert_check_key()
2396 op->lock = b->level; in bch_btree_insert_check_key()
2408 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); in bch_btree_insert_check_key()
2413 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2424 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) in btree_insert_fn() argument
2429 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2471 void bch_btree_set_root(struct btree *b) in bch_btree_set_root() argument
2478 trace_bcache_btree_set_root(b); in bch_btree_set_root()
2480 BUG_ON(!b->written); in bch_btree_set_root()
2482 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2483 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2485 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2486 list_del_init(&b->list); in bch_btree_set_root()
2487 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2489 b->c->root = b; in bch_btree_set_root()
2491 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2497 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_nodes_recurse() argument
2503 if (b->level) { in bch_btree_map_nodes_recurse()
2507 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2509 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2511 ret = bcache_btree(map_nodes_recurse, k, b, in bch_btree_map_nodes_recurse()
2520 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2521 ret = fn(op, b); in bch_btree_map_nodes_recurse()
2532 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_keys_recurse() argument
2540 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2542 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2543 ret = !b->level in bch_btree_map_keys_recurse()
2544 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2546 b, op, from, fn, flags); in bch_btree_map_keys_recurse()
2553 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2554 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2555 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2592 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, in refill_keybuf_fn() argument