Lines Matching refs:b
102 #define insert_lock(s, b) ((b)->level <= (s)->lock) argument
122 #define btree(fn, key, b, op, ...) \ argument
124 int _r, l = (b)->level - 1; \
126 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
127 _w, b); \
163 static inline struct bset *write_block(struct btree *b) in write_block() argument
165 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); in write_block()
168 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() argument
171 if (b->level && b->keys.nsets) in bch_btree_init_next()
172 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
174 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
176 if (b->written < btree_blocks(b)) in bch_btree_init_next()
177 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
178 bset_magic(&b->c->sb)); in bch_btree_init_next()
195 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() argument
197 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
204 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() argument
207 struct bset *i = btree_bset_first(b); in bch_btree_node_read_done()
210 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
211 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; in bch_btree_node_read_done()
215 iter->b = &b->keys; in bch_btree_node_read_done()
222 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
223 i = write_block(b)) { in bch_btree_node_read_done()
229 if (b->written + set_blocks(i, block_bytes(b->c)) > in bch_btree_node_read_done()
230 btree_blocks(b)) in bch_btree_node_read_done()
234 if (i->magic != bset_magic(&b->c->sb)) in bch_btree_node_read_done()
244 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
250 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
255 b->written += set_blocks(i, block_bytes(b->c)); in bch_btree_node_read_done()
259 for (i = write_block(b); in bch_btree_node_read_done()
260 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
261 i = ((void *) i) + block_bytes(b->c)) in bch_btree_node_read_done()
262 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
265 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
267 i = b->keys.set[0].data; in bch_btree_node_read_done()
269 if (b->keys.set[0].size && in bch_btree_node_read_done()
270 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
273 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
274 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
275 bset_magic(&b->c->sb)); in bch_btree_node_read_done()
277 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
280 set_btree_node_io_error(b); in bch_btree_node_read_done()
281 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
282 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
283 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
294 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() argument
300 trace_bcache_btree_read(b); in bch_btree_node_read()
304 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
305 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
310 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
312 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
316 set_btree_node_io_error(b); in bch_btree_node_read()
318 bch_bbio_free(bio, b->c); in bch_btree_node_read()
320 if (btree_node_io_error(b)) in bch_btree_node_read()
323 bch_btree_node_read_done(b); in bch_btree_node_read()
324 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
328 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
329 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
332 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() argument
335 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
336 wake_up_allocators(b->c); in btree_complete_write()
340 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
349 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock() local
351 up(&b->io_mutex); in btree_node_write_unlock()
356 struct btree *b = container_of(cl, struct btree, io); in __btree_node_write_done() local
357 struct btree_write *w = btree_prev_write(b); in __btree_node_write_done()
359 bch_bbio_free(b->bio, b->c); in __btree_node_write_done()
360 b->bio = NULL; in __btree_node_write_done()
361 btree_complete_write(b, w); in __btree_node_write_done()
363 if (btree_node_dirty(b)) in __btree_node_write_done()
364 schedule_delayed_work(&b->work, 30 * HZ); in __btree_node_write_done()
371 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_done() local
373 bio_free_pages(b->bio); in btree_node_write_done()
380 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() local
383 set_btree_node_io_error(b); in btree_node_write_endio()
385 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
389 static void do_btree_node_write(struct btree *b) in do_btree_node_write() argument
391 struct closure *cl = &b->io; in do_btree_node_write()
392 struct bset *i = btree_bset_last(b); in do_btree_node_write()
396 i->csum = btree_csum_set(b, i); in do_btree_node_write()
398 BUG_ON(b->bio); in do_btree_node_write()
399 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
401 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
402 b->bio->bi_private = cl; in do_btree_node_write()
403 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); in do_btree_node_write()
404 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; in do_btree_node_write()
405 bch_bio_map(b->bio, i); in do_btree_node_write()
422 bkey_copy(&k.key, &b->key); in do_btree_node_write()
424 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
426 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
431 bio_for_each_segment_all(bv, b->bio, j) in do_btree_node_write()
435 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
443 b->bio->bi_vcnt = 0; in do_btree_node_write()
444 bch_bio_map(b->bio, i); in do_btree_node_write()
446 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
453 void __bch_btree_node_write(struct btree *b, struct closure *parent) in __bch_btree_node_write() argument
455 struct bset *i = btree_bset_last(b); in __bch_btree_node_write()
457 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
459 trace_bcache_btree_write(b); in __bch_btree_node_write()
462 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
463 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
464 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
465 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
467 cancel_delayed_work(&b->work); in __bch_btree_node_write()
470 down(&b->io_mutex); in __bch_btree_node_write()
471 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
473 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
474 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
476 do_btree_node_write(b); in __bch_btree_node_write()
478 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, in __bch_btree_node_write()
479 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); in __bch_btree_node_write()
481 b->written += set_blocks(i, block_bytes(b->c)); in __bch_btree_node_write()
484 void bch_btree_node_write(struct btree *b, struct closure *parent) in bch_btree_node_write() argument
486 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
488 lockdep_assert_held(&b->lock); in bch_btree_node_write()
490 __bch_btree_node_write(b, parent); in bch_btree_node_write()
496 if (nsets && !b->keys.nsets) in bch_btree_node_write()
497 bch_btree_verify(b); in bch_btree_node_write()
499 bch_btree_init_next(b); in bch_btree_node_write()
502 static void bch_btree_node_write_sync(struct btree *b) in bch_btree_node_write_sync() argument
508 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
509 bch_btree_node_write(b, &cl); in bch_btree_node_write_sync()
510 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
517 struct btree *b = container_of(to_delayed_work(w), struct btree, work); in btree_node_write_work() local
519 mutex_lock(&b->write_lock); in btree_node_write_work()
520 if (btree_node_dirty(b)) in btree_node_write_work()
521 __bch_btree_node_write(b, NULL); in btree_node_write_work()
522 mutex_unlock(&b->write_lock); in btree_node_write_work()
525 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) in bch_btree_leaf_dirty() argument
527 struct bset *i = btree_bset_last(b); in bch_btree_leaf_dirty()
528 struct btree_write *w = btree_current_write(b); in bch_btree_leaf_dirty()
530 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
532 BUG_ON(!b->written); in bch_btree_leaf_dirty()
535 if (!btree_node_dirty(b)) in bch_btree_leaf_dirty()
536 schedule_delayed_work(&b->work, 30 * HZ); in bch_btree_leaf_dirty()
538 set_btree_node_dirty(b); in bch_btree_leaf_dirty()
542 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
556 bch_btree_node_write(b, NULL); in bch_btree_leaf_dirty()
569 static void mca_data_free(struct btree *b) in mca_data_free() argument
571 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
573 bch_btree_keys_free(&b->keys); in mca_data_free()
575 b->c->btree_cache_used--; in mca_data_free()
576 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
579 static void mca_bucket_free(struct btree *b) in mca_bucket_free() argument
581 BUG_ON(btree_node_dirty(b)); in mca_bucket_free()
583 b->key.ptr[0] = 0; in mca_bucket_free()
584 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
585 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
593 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) in mca_data_alloc() argument
595 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
597 ilog2(b->c->btree_pages), in mca_data_alloc()
600 b->c->btree_cache_used++; in mca_data_alloc()
601 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
603 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
610 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc() local
612 if (!b) in mca_bucket_alloc()
615 init_rwsem(&b->lock); in mca_bucket_alloc()
616 lockdep_set_novalidate_class(&b->lock); in mca_bucket_alloc()
617 mutex_init(&b->write_lock); in mca_bucket_alloc()
618 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
619 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
620 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
621 b->c = c; in mca_bucket_alloc()
622 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
624 mca_data_alloc(b, k, gfp); in mca_bucket_alloc()
625 return b; in mca_bucket_alloc()
628 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) in mca_reap() argument
633 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
635 if (!down_write_trylock(&b->lock)) in mca_reap()
638 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
640 if (b->keys.page_order < min_order) in mca_reap()
644 if (btree_node_dirty(b)) in mca_reap()
647 if (down_trylock(&b->io_mutex)) in mca_reap()
649 up(&b->io_mutex); in mca_reap()
652 mutex_lock(&b->write_lock); in mca_reap()
653 if (btree_node_dirty(b)) in mca_reap()
654 __bch_btree_node_write(b, &cl); in mca_reap()
655 mutex_unlock(&b->write_lock); in mca_reap()
660 down(&b->io_mutex); in mca_reap()
661 up(&b->io_mutex); in mca_reap()
665 rw_unlock(true, b); in mca_reap()
673 struct btree *b, *t; in bch_mca_scan() local
702 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
707 !mca_reap(b, 0, false)) { in bch_mca_scan()
708 mca_data_free(b); in bch_mca_scan()
709 rw_unlock(true, b); in bch_mca_scan()
719 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_mca_scan()
722 if (!b->accessed && in bch_mca_scan()
723 !mca_reap(b, 0, false)) { in bch_mca_scan()
724 mca_bucket_free(b); in bch_mca_scan()
725 mca_data_free(b); in bch_mca_scan()
726 rw_unlock(true, b); in bch_mca_scan()
729 b->accessed = 0; in bch_mca_scan()
752 struct btree *b; in bch_btree_cache_free() local
773 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
775 if (btree_node_dirty(b)) in bch_btree_cache_free()
776 btree_complete_write(b, btree_current_write(b)); in bch_btree_cache_free()
777 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
779 mca_data_free(b); in bch_btree_cache_free()
783 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
785 list_del(&b->list); in bch_btree_cache_free()
786 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
787 kfree(b); in bch_btree_cache_free()
840 struct btree *b; in mca_find() local
843 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
844 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
846 b = NULL; in mca_find()
849 return b; in mca_find()
870 struct btree *b; in mca_cannibalize() local
877 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
878 if (!mca_reap(b, btree_order(k), false)) in mca_cannibalize()
879 return b; in mca_cannibalize()
881 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
882 if (!mca_reap(b, btree_order(k), true)) in mca_cannibalize()
883 return b; in mca_cannibalize()
906 struct btree *b; in mca_alloc() local
918 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
919 if (!mca_reap(b, btree_order(k), false)) in mca_alloc()
925 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
926 if (!mca_reap(b, 0, false)) { in mca_alloc()
927 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
928 if (!b->keys.set[0].data) in mca_alloc()
934 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
935 if (!b) in mca_alloc()
938 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
939 if (!b->keys.set->data) in mca_alloc()
942 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
944 bkey_copy(&b->key, k); in mca_alloc()
945 list_move(&b->list, &c->btree_cache); in mca_alloc()
946 hlist_del_init_rcu(&b->hash); in mca_alloc()
947 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
949 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
950 b->parent = (void *) ~0UL; in mca_alloc()
951 b->flags = 0; in mca_alloc()
952 b->written = 0; in mca_alloc()
953 b->level = level; in mca_alloc()
955 if (!b->level) in mca_alloc()
956 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
957 &b->c->expensive_debug_checks); in mca_alloc()
959 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
960 &b->c->expensive_debug_checks); in mca_alloc()
962 return b; in mca_alloc()
964 if (b) in mca_alloc()
965 rw_unlock(true, b); in mca_alloc()
967 b = mca_cannibalize(c, op, k); in mca_alloc()
968 if (!IS_ERR(b)) in mca_alloc()
971 return b; in mca_alloc()
988 struct btree *b; in bch_btree_node_get() local
992 b = mca_find(c, k); in bch_btree_node_get()
994 if (!b) { in bch_btree_node_get()
999 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1002 if (!b) in bch_btree_node_get()
1004 if (IS_ERR(b)) in bch_btree_node_get()
1005 return b; in bch_btree_node_get()
1007 bch_btree_node_read(b); in bch_btree_node_get()
1010 downgrade_write(&b->lock); in bch_btree_node_get()
1012 rw_lock(write, b, level); in bch_btree_node_get()
1013 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1014 rw_unlock(write, b); in bch_btree_node_get()
1017 BUG_ON(b->level != level); in bch_btree_node_get()
1020 if (btree_node_io_error(b)) { in bch_btree_node_get()
1021 rw_unlock(write, b); in bch_btree_node_get()
1025 BUG_ON(!b->written); in bch_btree_node_get()
1027 b->parent = parent; in bch_btree_node_get()
1028 b->accessed = 1; in bch_btree_node_get()
1030 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1031 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1032 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1035 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1036 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1038 return b; in bch_btree_node_get()
1043 struct btree *b; in btree_node_prefetch() local
1046 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1049 if (!IS_ERR_OR_NULL(b)) { in btree_node_prefetch()
1050 b->parent = parent; in btree_node_prefetch()
1051 bch_btree_node_read(b); in btree_node_prefetch()
1052 rw_unlock(true, b); in btree_node_prefetch()
1058 static void btree_node_free(struct btree *b) in btree_node_free() argument
1060 trace_bcache_btree_node_free(b); in btree_node_free()
1062 BUG_ON(b == b->c->root); in btree_node_free()
1064 mutex_lock(&b->write_lock); in btree_node_free()
1066 if (btree_node_dirty(b)) in btree_node_free()
1067 btree_complete_write(b, btree_current_write(b)); in btree_node_free()
1068 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1070 mutex_unlock(&b->write_lock); in btree_node_free()
1072 cancel_delayed_work(&b->work); in btree_node_free()
1074 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1075 bch_bucket_free(b->c, &b->key); in btree_node_free()
1076 mca_bucket_free(b); in btree_node_free()
1077 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1085 struct btree *b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc() local
1095 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1096 if (IS_ERR(b)) in __bch_btree_node_alloc()
1099 if (!b) { in __bch_btree_node_alloc()
1105 b->accessed = 1; in __bch_btree_node_alloc()
1106 b->parent = parent; in __bch_btree_node_alloc()
1107 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); in __bch_btree_node_alloc()
1111 trace_bcache_btree_node_alloc(b); in __bch_btree_node_alloc()
1112 return b; in __bch_btree_node_alloc()
1119 return b; in __bch_btree_node_alloc()
1129 static struct btree *btree_node_alloc_replacement(struct btree *b, in btree_node_alloc_replacement() argument
1132 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1136 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1137 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1144 static void make_btree_freeing_key(struct btree *b, struct bkey *k) in make_btree_freeing_key() argument
1148 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1150 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1152 bkey_copy(k, &b->key); in make_btree_freeing_key()
1157 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), in make_btree_freeing_key()
1158 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1160 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1163 static int btree_check_reserve(struct btree *b, struct btree_op *op) in btree_check_reserve() argument
1165 struct cache_set *c = b->c; in btree_check_reserve()
1167 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1182 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1239 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) argument
1248 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key() local
1250 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1253 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1254 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1255 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1266 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) in btree_gc_mark_node() argument
1276 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1277 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node()
1280 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1290 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1292 bset_written(&b->keys, t) && in btree_gc_mark_node()
1293 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1294 b, "found short btree key in gc"); in btree_gc_mark_node()
1296 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1311 struct btree *b; member
1315 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1320 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, in btree_gc_coalesce() argument
1331 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1337 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) in btree_gc_coalesce()
1340 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1343 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1344 block_bytes(b->c)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1348 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); in btree_gc_coalesce()
1359 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1378 block_bytes(b->c)) > blocks) in btree_gc_coalesce()
1394 block_bytes(b->c)) > in btree_gc_coalesce()
1400 last = &r->b->key; in btree_gc_coalesce()
1403 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > in btree_gc_coalesce()
1443 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1446 make_btree_freeing_key(r[i].b, keylist.top); in btree_gc_coalesce()
1450 bch_btree_insert_node(b, op, &keylist, NULL, NULL); in btree_gc_coalesce()
1454 btree_node_free(r[i].b); in btree_gc_coalesce()
1455 rw_unlock(true, r[i].b); in btree_gc_coalesce()
1457 r[i].b = new_nodes[i]; in btree_gc_coalesce()
1461 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1477 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1487 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, in btree_gc_rewrite_node() argument
1493 if (btree_check_reserve(b, NULL)) in btree_gc_rewrite_node()
1499 if (btree_check_reserve(b, NULL)) { in btree_gc_rewrite_node()
1513 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1523 static unsigned int btree_gc_count_keys(struct btree *b) in btree_gc_count_keys() argument
1529 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1561 static int btree_gc_recurse(struct btree *b, struct btree_op *op, in btree_gc_recurse() argument
1571 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1574 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1577 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1579 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1580 true, b); in btree_gc_recurse()
1581 if (IS_ERR(r->b)) { in btree_gc_recurse()
1582 ret = PTR_ERR(r->b); in btree_gc_recurse()
1586 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1588 ret = btree_gc_coalesce(b, op, gc, r); in btree_gc_recurse()
1593 if (!last->b) in btree_gc_recurse()
1596 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1597 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1599 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1604 if (last->b->level) { in btree_gc_recurse()
1605 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1610 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1616 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1617 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1618 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1619 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1620 rw_unlock(true, last->b); in btree_gc_recurse()
1624 r->b = NULL; in btree_gc_recurse()
1626 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1627 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1640 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1641 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1642 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1643 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1644 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1645 rw_unlock(true, i->b); in btree_gc_recurse()
1651 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, in bch_btree_gc_root() argument
1658 should_rewrite = btree_gc_mark_node(b, gc); in bch_btree_gc_root()
1660 n = btree_node_alloc_replacement(b, NULL); in bch_btree_gc_root()
1666 btree_node_free(b); in bch_btree_gc_root()
1673 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1675 if (b->level) { in bch_btree_gc_root()
1676 ret = btree_gc_recurse(b, op, writes, gc); in bch_btree_gc_root()
1681 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1689 struct bucket *b; in btree_gc_start() local
1701 for_each_bucket(b, ca) { in btree_gc_start()
1702 b->last_gc = b->gen; in btree_gc_start()
1703 if (!atomic_read(&b->pin)) { in btree_gc_start()
1704 SET_GC_MARK(b, 0); in btree_gc_start()
1705 SET_GC_SECTORS_USED(b, 0); in btree_gc_start()
1714 struct bucket *b; in bch_btree_gc_finish() local
1763 for_each_bucket(b, ca) { in bch_btree_gc_finish()
1764 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1766 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1769 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); in bch_btree_gc_finish()
1771 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) in bch_btree_gc_finish()
1868 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) in bch_btree_check_recurse() argument
1874 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1875 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1877 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1879 if (b->level) { in bch_btree_check_recurse()
1880 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1883 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1886 btree_node_prefetch(b, k); in bch_btree_check_recurse()
1891 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1895 ret = btree(check_recurse, p, b, op); in bch_btree_check_recurse()
1916 struct bucket *b; in bch_initial_gc_finish() local
1933 for_each_bucket(b, ca) { in bch_initial_gc_finish()
1938 if (bch_can_invalidate_bucket(ca, b) && in bch_initial_gc_finish()
1939 !GC_MARK(b)) { in bch_initial_gc_finish()
1940 __bch_invalidate_one_bucket(ca, b); in bch_initial_gc_finish()
1942 b - ca->buckets)) in bch_initial_gc_finish()
1944 b - ca->buckets); in bch_initial_gc_finish()
1954 static bool btree_insert_key(struct btree *b, struct bkey *k, in btree_insert_key() argument
1959 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
1961 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
1963 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
1966 trace_bcache_btree_insert_key(b, k, replace_key != NULL, in btree_insert_key()
1973 static size_t insert_u64s_remaining(struct btree *b) in insert_u64s_remaining() argument
1975 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
1980 if (b->keys.ops->is_extents) in insert_u64s_remaining()
1986 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, in bch_btree_insert_keys() argument
1991 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
1996 if (bkey_u64s(k) > insert_u64s_remaining(b)) in bch_btree_insert_keys()
1999 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2000 if (!b->level) in bch_btree_insert_keys()
2001 bkey_put(b->c, k); in bch_btree_insert_keys()
2003 ret |= btree_insert_key(b, k, replace_key); in bch_btree_insert_keys()
2005 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2009 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
2010 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2012 ret |= btree_insert_key(b, &temp.key, replace_key); in bch_btree_insert_keys()
2022 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
2024 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2028 static int btree_split(struct btree *b, struct btree_op *op, in btree_split() argument
2041 if (btree_check_reserve(b, op)) { in btree_split()
2042 if (!b->level) in btree_split()
2048 n1 = btree_node_alloc_replacement(b, op); in btree_split()
2053 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; in btree_split()
2058 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2060 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2064 if (!b->parent) { in btree_split()
2065 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2095 bkey_copy_key(&n2->key, &b->key); in btree_split()
2102 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2123 } else if (!b->parent) { in btree_split()
2130 make_btree_freeing_key(b, parent_keys.top); in btree_split()
2133 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2137 btree_node_free(b); in btree_split()
2140 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2144 bkey_put(b->c, &n2->key); in btree_split()
2148 bkey_put(b->c, &n1->key); in btree_split()
2152 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2162 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, in bch_btree_insert_node() argument
2169 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2173 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2175 if (write_block(b) != btree_bset_last(b) && in bch_btree_insert_node()
2176 b->keys.last_set_unwritten) in bch_btree_insert_node()
2177 bch_btree_init_next(b); /* just wrote a set */ in bch_btree_insert_node()
2179 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { in bch_btree_insert_node()
2180 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2184 BUG_ON(write_block(b) != btree_bset_last(b)); in bch_btree_insert_node()
2186 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { in bch_btree_insert_node()
2187 if (!b->level) in bch_btree_insert_node()
2188 bch_btree_leaf_dirty(b, journal_ref); in bch_btree_insert_node()
2190 bch_btree_node_write(b, &cl); in bch_btree_insert_node()
2193 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2201 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2203 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2204 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2208 int ret = btree_split(b, op, insert_keys, replace_key); in bch_btree_insert_node()
2218 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, in bch_btree_insert_check_key() argument
2222 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2223 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2230 rw_unlock(false, b); in bch_btree_insert_check_key()
2231 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2233 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2234 b->seq != seq + 1) { in bch_btree_insert_check_key()
2235 op->lock = b->level; in bch_btree_insert_check_key()
2247 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); in bch_btree_insert_check_key()
2252 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2263 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) in btree_insert_fn() argument
2268 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2310 void bch_btree_set_root(struct btree *b) in bch_btree_set_root() argument
2317 trace_bcache_btree_set_root(b); in bch_btree_set_root()
2319 BUG_ON(!b->written); in bch_btree_set_root()
2321 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2322 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2324 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2325 list_del_init(&b->list); in bch_btree_set_root()
2326 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2328 b->c->root = b; in bch_btree_set_root()
2330 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2336 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_nodes_recurse() argument
2342 if (b->level) { in bch_btree_map_nodes_recurse()
2346 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2348 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2350 ret = btree(map_nodes_recurse, k, b, in bch_btree_map_nodes_recurse()
2359 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2360 ret = fn(op, b); in bch_btree_map_nodes_recurse()
2371 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_keys_recurse() argument
2379 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2381 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2382 ret = !b->level in bch_btree_map_keys_recurse()
2383 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2384 : btree(map_keys_recurse, k, b, op, from, fn, flags); in bch_btree_map_keys_recurse()
2391 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2392 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2393 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2430 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, in refill_keybuf_fn() argument