Lines Matching refs:b

102 #define insert_lock(s, b)	((b)->level <= (s)->lock)  argument
122 #define btree(fn, key, b, op, ...) \ argument
124 int _r, l = (b)->level - 1; \
126 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
127 _w, b); \
163 static inline struct bset *write_block(struct btree *b) in write_block() argument
165 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); in write_block()
168 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() argument
171 if (b->level && b->keys.nsets) in bch_btree_init_next()
172 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
174 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
176 if (b->written < btree_blocks(b)) in bch_btree_init_next()
177 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
178 bset_magic(&b->c->sb)); in bch_btree_init_next()
195 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() argument
197 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
204 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() argument
207 struct bset *i = btree_bset_first(b); in bch_btree_node_read_done()
215 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
216 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; in bch_btree_node_read_done()
220 iter->b = &b->keys; in bch_btree_node_read_done()
227 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
228 i = write_block(b)) { in bch_btree_node_read_done()
234 if (b->written + set_blocks(i, block_bytes(b->c)) > in bch_btree_node_read_done()
235 btree_blocks(b)) in bch_btree_node_read_done()
239 if (i->magic != bset_magic(&b->c->sb)) in bch_btree_node_read_done()
249 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
255 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
260 b->written += set_blocks(i, block_bytes(b->c)); in bch_btree_node_read_done()
264 for (i = write_block(b); in bch_btree_node_read_done()
265 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
266 i = ((void *) i) + block_bytes(b->c)) in bch_btree_node_read_done()
267 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
270 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
272 i = b->keys.set[0].data; in bch_btree_node_read_done()
274 if (b->keys.set[0].size && in bch_btree_node_read_done()
275 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
278 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
279 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
280 bset_magic(&b->c->sb)); in bch_btree_node_read_done()
282 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
285 set_btree_node_io_error(b); in bch_btree_node_read_done()
286 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
287 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
288 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
299 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() argument
305 trace_bcache_btree_read(b); in bch_btree_node_read()
309 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
310 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
315 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
317 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
321 set_btree_node_io_error(b); in bch_btree_node_read()
323 bch_bbio_free(bio, b->c); in bch_btree_node_read()
325 if (btree_node_io_error(b)) in bch_btree_node_read()
328 bch_btree_node_read_done(b); in bch_btree_node_read()
329 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
333 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
334 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
337 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() argument
340 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
341 wake_up_allocators(b->c); in btree_complete_write()
345 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
354 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock() local
356 up(&b->io_mutex); in btree_node_write_unlock()
361 struct btree *b = container_of(cl, struct btree, io); in __btree_node_write_done() local
362 struct btree_write *w = btree_prev_write(b); in __btree_node_write_done()
364 bch_bbio_free(b->bio, b->c); in __btree_node_write_done()
365 b->bio = NULL; in __btree_node_write_done()
366 btree_complete_write(b, w); in __btree_node_write_done()
368 if (btree_node_dirty(b)) in __btree_node_write_done()
369 schedule_delayed_work(&b->work, 30 * HZ); in __btree_node_write_done()
376 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_done() local
378 bio_free_pages(b->bio); in btree_node_write_done()
385 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() local
388 set_btree_node_io_error(b); in btree_node_write_endio()
390 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
394 static void do_btree_node_write(struct btree *b) in do_btree_node_write() argument
396 struct closure *cl = &b->io; in do_btree_node_write()
397 struct bset *i = btree_bset_last(b); in do_btree_node_write()
401 i->csum = btree_csum_set(b, i); in do_btree_node_write()
403 BUG_ON(b->bio); in do_btree_node_write()
404 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
406 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
407 b->bio->bi_private = cl; in do_btree_node_write()
408 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); in do_btree_node_write()
409 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; in do_btree_node_write()
410 bch_bio_map(b->bio, i); in do_btree_node_write()
427 bkey_copy(&k.key, &b->key); in do_btree_node_write()
429 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
431 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
436 bio_for_each_segment_all(bv, b->bio, iter_all) { in do_btree_node_write()
441 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
449 b->bio->bi_vcnt = 0; in do_btree_node_write()
450 bch_bio_map(b->bio, i); in do_btree_node_write()
452 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
459 void __bch_btree_node_write(struct btree *b, struct closure *parent) in __bch_btree_node_write() argument
461 struct bset *i = btree_bset_last(b); in __bch_btree_node_write()
463 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
465 trace_bcache_btree_write(b); in __bch_btree_node_write()
468 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
469 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
470 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
471 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
473 cancel_delayed_work(&b->work); in __bch_btree_node_write()
476 down(&b->io_mutex); in __bch_btree_node_write()
477 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
479 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
480 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
482 do_btree_node_write(b); in __bch_btree_node_write()
484 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, in __bch_btree_node_write()
485 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); in __bch_btree_node_write()
487 b->written += set_blocks(i, block_bytes(b->c)); in __bch_btree_node_write()
490 void bch_btree_node_write(struct btree *b, struct closure *parent) in bch_btree_node_write() argument
492 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
494 lockdep_assert_held(&b->lock); in bch_btree_node_write()
496 __bch_btree_node_write(b, parent); in bch_btree_node_write()
502 if (nsets && !b->keys.nsets) in bch_btree_node_write()
503 bch_btree_verify(b); in bch_btree_node_write()
505 bch_btree_init_next(b); in bch_btree_node_write()
508 static void bch_btree_node_write_sync(struct btree *b) in bch_btree_node_write_sync() argument
514 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
515 bch_btree_node_write(b, &cl); in bch_btree_node_write_sync()
516 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
523 struct btree *b = container_of(to_delayed_work(w), struct btree, work); in btree_node_write_work() local
525 mutex_lock(&b->write_lock); in btree_node_write_work()
526 if (btree_node_dirty(b)) in btree_node_write_work()
527 __bch_btree_node_write(b, NULL); in btree_node_write_work()
528 mutex_unlock(&b->write_lock); in btree_node_write_work()
531 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) in bch_btree_leaf_dirty() argument
533 struct bset *i = btree_bset_last(b); in bch_btree_leaf_dirty()
534 struct btree_write *w = btree_current_write(b); in bch_btree_leaf_dirty()
536 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
538 BUG_ON(!b->written); in bch_btree_leaf_dirty()
541 if (!btree_node_dirty(b)) in bch_btree_leaf_dirty()
542 schedule_delayed_work(&b->work, 30 * HZ); in bch_btree_leaf_dirty()
544 set_btree_node_dirty(b); in bch_btree_leaf_dirty()
548 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
562 bch_btree_node_write(b, NULL); in bch_btree_leaf_dirty()
575 static void mca_data_free(struct btree *b) in mca_data_free() argument
577 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
579 bch_btree_keys_free(&b->keys); in mca_data_free()
581 b->c->btree_cache_used--; in mca_data_free()
582 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
585 static void mca_bucket_free(struct btree *b) in mca_bucket_free() argument
587 BUG_ON(btree_node_dirty(b)); in mca_bucket_free()
589 b->key.ptr[0] = 0; in mca_bucket_free()
590 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
591 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
599 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) in mca_data_alloc() argument
601 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
603 ilog2(b->c->btree_pages), in mca_data_alloc()
606 b->c->btree_cache_used++; in mca_data_alloc()
607 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
609 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
620 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc() local
622 if (!b) in mca_bucket_alloc()
625 init_rwsem(&b->lock); in mca_bucket_alloc()
626 lockdep_set_novalidate_class(&b->lock); in mca_bucket_alloc()
627 mutex_init(&b->write_lock); in mca_bucket_alloc()
628 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
629 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
630 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
631 b->c = c; in mca_bucket_alloc()
632 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
634 mca_data_alloc(b, k, gfp); in mca_bucket_alloc()
635 return b; in mca_bucket_alloc()
638 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) in mca_reap() argument
643 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
645 if (!down_write_trylock(&b->lock)) in mca_reap()
648 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
650 if (b->keys.page_order < min_order) in mca_reap()
654 if (btree_node_dirty(b)) in mca_reap()
657 if (down_trylock(&b->io_mutex)) in mca_reap()
659 up(&b->io_mutex); in mca_reap()
668 mutex_lock(&b->write_lock); in mca_reap()
674 if (btree_node_journal_flush(b)) { in mca_reap()
675 pr_debug("bnode %p is flushing by journal, retry", b); in mca_reap()
676 mutex_unlock(&b->write_lock); in mca_reap()
681 if (btree_node_dirty(b)) in mca_reap()
682 __bch_btree_node_write(b, &cl); in mca_reap()
683 mutex_unlock(&b->write_lock); in mca_reap()
688 down(&b->io_mutex); in mca_reap()
689 up(&b->io_mutex); in mca_reap()
693 rw_unlock(true, b); in mca_reap()
701 struct btree *b, *t; in bch_mca_scan() local
730 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
735 !mca_reap(b, 0, false)) { in bch_mca_scan()
736 mca_data_free(b); in bch_mca_scan()
737 rw_unlock(true, b); in bch_mca_scan()
747 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_mca_scan()
750 if (!b->accessed && in bch_mca_scan()
751 !mca_reap(b, 0, false)) { in bch_mca_scan()
752 mca_bucket_free(b); in bch_mca_scan()
753 mca_data_free(b); in bch_mca_scan()
754 rw_unlock(true, b); in bch_mca_scan()
757 b->accessed = 0; in bch_mca_scan()
780 struct btree *b; in bch_btree_cache_free() local
801 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
808 if (btree_node_dirty(b)) { in bch_btree_cache_free()
809 btree_complete_write(b, btree_current_write(b)); in bch_btree_cache_free()
810 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
812 mca_data_free(b); in bch_btree_cache_free()
816 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
818 list_del(&b->list); in bch_btree_cache_free()
819 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
820 kfree(b); in bch_btree_cache_free()
873 struct btree *b; in mca_find() local
876 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
877 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
879 b = NULL; in mca_find()
882 return b; in mca_find()
903 struct btree *b; in mca_cannibalize() local
910 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
911 if (!mca_reap(b, btree_order(k), false)) in mca_cannibalize()
912 return b; in mca_cannibalize()
914 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
915 if (!mca_reap(b, btree_order(k), true)) in mca_cannibalize()
916 return b; in mca_cannibalize()
939 struct btree *b; in mca_alloc() local
951 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
952 if (!mca_reap(b, btree_order(k), false)) in mca_alloc()
958 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
959 if (!mca_reap(b, 0, false)) { in mca_alloc()
960 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
961 if (!b->keys.set[0].data) in mca_alloc()
967 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
968 if (!b) in mca_alloc()
971 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
972 if (!b->keys.set->data) in mca_alloc()
975 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
977 bkey_copy(&b->key, k); in mca_alloc()
978 list_move(&b->list, &c->btree_cache); in mca_alloc()
979 hlist_del_init_rcu(&b->hash); in mca_alloc()
980 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
982 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
983 b->parent = (void *) ~0UL; in mca_alloc()
984 b->flags = 0; in mca_alloc()
985 b->written = 0; in mca_alloc()
986 b->level = level; in mca_alloc()
988 if (!b->level) in mca_alloc()
989 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
990 &b->c->expensive_debug_checks); in mca_alloc()
992 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
993 &b->c->expensive_debug_checks); in mca_alloc()
995 return b; in mca_alloc()
997 if (b) in mca_alloc()
998 rw_unlock(true, b); in mca_alloc()
1000 b = mca_cannibalize(c, op, k); in mca_alloc()
1001 if (!IS_ERR(b)) in mca_alloc()
1004 return b; in mca_alloc()
1021 struct btree *b; in bch_btree_node_get() local
1025 b = mca_find(c, k); in bch_btree_node_get()
1027 if (!b) { in bch_btree_node_get()
1032 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1035 if (!b) in bch_btree_node_get()
1037 if (IS_ERR(b)) in bch_btree_node_get()
1038 return b; in bch_btree_node_get()
1040 bch_btree_node_read(b); in bch_btree_node_get()
1043 downgrade_write(&b->lock); in bch_btree_node_get()
1045 rw_lock(write, b, level); in bch_btree_node_get()
1046 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1047 rw_unlock(write, b); in bch_btree_node_get()
1050 BUG_ON(b->level != level); in bch_btree_node_get()
1053 if (btree_node_io_error(b)) { in bch_btree_node_get()
1054 rw_unlock(write, b); in bch_btree_node_get()
1058 BUG_ON(!b->written); in bch_btree_node_get()
1060 b->parent = parent; in bch_btree_node_get()
1061 b->accessed = 1; in bch_btree_node_get()
1063 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1064 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1065 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1068 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1069 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1071 return b; in bch_btree_node_get()
1076 struct btree *b; in btree_node_prefetch() local
1079 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1082 if (!IS_ERR_OR_NULL(b)) { in btree_node_prefetch()
1083 b->parent = parent; in btree_node_prefetch()
1084 bch_btree_node_read(b); in btree_node_prefetch()
1085 rw_unlock(true, b); in btree_node_prefetch()
1091 static void btree_node_free(struct btree *b) in btree_node_free() argument
1093 trace_bcache_btree_node_free(b); in btree_node_free()
1095 BUG_ON(b == b->c->root); in btree_node_free()
1098 mutex_lock(&b->write_lock); in btree_node_free()
1105 if (btree_node_journal_flush(b)) { in btree_node_free()
1106 mutex_unlock(&b->write_lock); in btree_node_free()
1107 pr_debug("bnode %p journal_flush set, retry", b); in btree_node_free()
1112 if (btree_node_dirty(b)) { in btree_node_free()
1113 btree_complete_write(b, btree_current_write(b)); in btree_node_free()
1114 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1117 mutex_unlock(&b->write_lock); in btree_node_free()
1119 cancel_delayed_work(&b->work); in btree_node_free()
1121 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1122 bch_bucket_free(b->c, &b->key); in btree_node_free()
1123 mca_bucket_free(b); in btree_node_free()
1124 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1132 struct btree *b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc() local
1142 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1143 if (IS_ERR(b)) in __bch_btree_node_alloc()
1146 if (!b) { in __bch_btree_node_alloc()
1152 b->accessed = 1; in __bch_btree_node_alloc()
1153 b->parent = parent; in __bch_btree_node_alloc()
1154 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); in __bch_btree_node_alloc()
1158 trace_bcache_btree_node_alloc(b); in __bch_btree_node_alloc()
1159 return b; in __bch_btree_node_alloc()
1166 return b; in __bch_btree_node_alloc()
1176 static struct btree *btree_node_alloc_replacement(struct btree *b, in btree_node_alloc_replacement() argument
1179 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1183 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1184 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1191 static void make_btree_freeing_key(struct btree *b, struct bkey *k) in make_btree_freeing_key() argument
1195 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1197 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1199 bkey_copy(k, &b->key); in make_btree_freeing_key()
1204 bch_inc_gen(PTR_CACHE(b->c, &b->key, i), in make_btree_freeing_key()
1205 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1207 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1210 static int btree_check_reserve(struct btree *b, struct btree_op *op) in btree_check_reserve() argument
1212 struct cache_set *c = b->c; in btree_check_reserve()
1214 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1229 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1286 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) argument
1295 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key() local
1297 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1300 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1301 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1302 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1313 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) in btree_gc_mark_node() argument
1323 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1324 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node()
1327 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1337 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1339 bset_written(&b->keys, t) && in btree_gc_mark_node()
1340 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1341 b, "found short btree key in gc"); in btree_gc_mark_node()
1343 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1358 struct btree *b; member
1362 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1367 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, in btree_gc_coalesce() argument
1378 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1384 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) in btree_gc_coalesce()
1387 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1390 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1391 block_bytes(b->c)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1395 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); in btree_gc_coalesce()
1406 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1425 block_bytes(b->c)) > blocks) in btree_gc_coalesce()
1441 block_bytes(b->c)) > in btree_gc_coalesce()
1447 last = &r->b->key; in btree_gc_coalesce()
1450 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > in btree_gc_coalesce()
1490 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1493 make_btree_freeing_key(r[i].b, keylist.top); in btree_gc_coalesce()
1497 bch_btree_insert_node(b, op, &keylist, NULL, NULL); in btree_gc_coalesce()
1501 btree_node_free(r[i].b); in btree_gc_coalesce()
1502 rw_unlock(true, r[i].b); in btree_gc_coalesce()
1504 r[i].b = new_nodes[i]; in btree_gc_coalesce()
1508 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1523 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1534 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, in btree_gc_rewrite_node() argument
1540 if (btree_check_reserve(b, NULL)) in btree_gc_rewrite_node()
1546 if (btree_check_reserve(b, NULL)) { in btree_gc_rewrite_node()
1560 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1570 static unsigned int btree_gc_count_keys(struct btree *b) in btree_gc_count_keys() argument
1576 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1608 static int btree_gc_recurse(struct btree *b, struct btree_op *op, in btree_gc_recurse() argument
1618 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1621 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1624 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1626 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1627 true, b); in btree_gc_recurse()
1628 if (IS_ERR(r->b)) { in btree_gc_recurse()
1629 ret = PTR_ERR(r->b); in btree_gc_recurse()
1633 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1635 ret = btree_gc_coalesce(b, op, gc, r); in btree_gc_recurse()
1640 if (!last->b) in btree_gc_recurse()
1643 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1644 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1646 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1651 if (last->b->level) { in btree_gc_recurse()
1652 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1657 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1663 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1664 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1665 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1666 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1667 rw_unlock(true, last->b); in btree_gc_recurse()
1671 r->b = NULL; in btree_gc_recurse()
1673 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1674 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1687 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1688 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1689 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1690 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1691 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1692 rw_unlock(true, i->b); in btree_gc_recurse()
1698 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, in bch_btree_gc_root() argument
1705 should_rewrite = btree_gc_mark_node(b, gc); in bch_btree_gc_root()
1707 n = btree_node_alloc_replacement(b, NULL); in bch_btree_gc_root()
1713 btree_node_free(b); in bch_btree_gc_root()
1720 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1722 if (b->level) { in bch_btree_gc_root()
1723 ret = btree_gc_recurse(b, op, writes, gc); in bch_btree_gc_root()
1728 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1736 struct bucket *b; in btree_gc_start() local
1748 for_each_bucket(b, ca) { in btree_gc_start()
1749 b->last_gc = b->gen; in btree_gc_start()
1750 if (!atomic_read(&b->pin)) { in btree_gc_start()
1751 SET_GC_MARK(b, 0); in btree_gc_start()
1752 SET_GC_SECTORS_USED(b, 0); in btree_gc_start()
1761 struct bucket *b; in bch_btree_gc_finish() local
1810 for_each_bucket(b, ca) { in bch_btree_gc_finish()
1811 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1813 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1816 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); in bch_btree_gc_finish()
1818 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) in bch_btree_gc_finish()
1915 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) in bch_btree_check_recurse() argument
1921 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1922 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1924 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1926 if (b->level) { in bch_btree_check_recurse()
1927 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1930 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1933 btree_node_prefetch(b, k); in bch_btree_check_recurse()
1938 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1942 ret = btree(check_recurse, p, b, op); in bch_btree_check_recurse()
1963 struct bucket *b; in bch_initial_gc_finish() local
1980 for_each_bucket(b, ca) { in bch_initial_gc_finish()
1985 if (bch_can_invalidate_bucket(ca, b) && in bch_initial_gc_finish()
1986 !GC_MARK(b)) { in bch_initial_gc_finish()
1987 __bch_invalidate_one_bucket(ca, b); in bch_initial_gc_finish()
1989 b - ca->buckets)) in bch_initial_gc_finish()
1991 b - ca->buckets); in bch_initial_gc_finish()
2001 static bool btree_insert_key(struct btree *b, struct bkey *k, in btree_insert_key() argument
2006 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
2008 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2010 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2013 trace_bcache_btree_insert_key(b, k, replace_key != NULL, in btree_insert_key()
2020 static size_t insert_u64s_remaining(struct btree *b) in insert_u64s_remaining() argument
2022 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2027 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2033 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, in bch_btree_insert_keys() argument
2038 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2043 if (bkey_u64s(k) > insert_u64s_remaining(b)) in bch_btree_insert_keys()
2046 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2047 if (!b->level) in bch_btree_insert_keys()
2048 bkey_put(b->c, k); in bch_btree_insert_keys()
2050 ret |= btree_insert_key(b, k, replace_key); in bch_btree_insert_keys()
2052 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2056 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
2057 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2059 ret |= btree_insert_key(b, &temp.key, replace_key); in bch_btree_insert_keys()
2069 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
2071 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2075 static int btree_split(struct btree *b, struct btree_op *op, in btree_split() argument
2088 if (btree_check_reserve(b, op)) { in btree_split()
2089 if (!b->level) in btree_split()
2095 n1 = btree_node_alloc_replacement(b, op); in btree_split()
2100 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; in btree_split()
2105 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2107 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2111 if (!b->parent) { in btree_split()
2112 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2142 bkey_copy_key(&n2->key, &b->key); in btree_split()
2149 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2170 } else if (!b->parent) { in btree_split()
2177 make_btree_freeing_key(b, parent_keys.top); in btree_split()
2180 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2184 btree_node_free(b); in btree_split()
2187 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2191 bkey_put(b->c, &n2->key); in btree_split()
2195 bkey_put(b->c, &n1->key); in btree_split()
2199 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2209 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, in bch_btree_insert_node() argument
2216 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2220 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2222 if (write_block(b) != btree_bset_last(b) && in bch_btree_insert_node()
2223 b->keys.last_set_unwritten) in bch_btree_insert_node()
2224 bch_btree_init_next(b); /* just wrote a set */ in bch_btree_insert_node()
2226 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { in bch_btree_insert_node()
2227 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2231 BUG_ON(write_block(b) != btree_bset_last(b)); in bch_btree_insert_node()
2233 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { in bch_btree_insert_node()
2234 if (!b->level) in bch_btree_insert_node()
2235 bch_btree_leaf_dirty(b, journal_ref); in bch_btree_insert_node()
2237 bch_btree_node_write(b, &cl); in bch_btree_insert_node()
2240 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2248 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2250 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2251 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2255 int ret = btree_split(b, op, insert_keys, replace_key); in bch_btree_insert_node()
2265 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, in bch_btree_insert_check_key() argument
2269 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2270 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2277 rw_unlock(false, b); in bch_btree_insert_check_key()
2278 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2280 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2281 b->seq != seq + 1) { in bch_btree_insert_check_key()
2282 op->lock = b->level; in bch_btree_insert_check_key()
2294 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); in bch_btree_insert_check_key()
2299 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2310 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) in btree_insert_fn() argument
2315 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2357 void bch_btree_set_root(struct btree *b) in bch_btree_set_root() argument
2364 trace_bcache_btree_set_root(b); in bch_btree_set_root()
2366 BUG_ON(!b->written); in bch_btree_set_root()
2368 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2369 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2371 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2372 list_del_init(&b->list); in bch_btree_set_root()
2373 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2375 b->c->root = b; in bch_btree_set_root()
2377 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2383 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_nodes_recurse() argument
2389 if (b->level) { in bch_btree_map_nodes_recurse()
2393 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2395 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2397 ret = btree(map_nodes_recurse, k, b, in bch_btree_map_nodes_recurse()
2406 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2407 ret = fn(op, b); in bch_btree_map_nodes_recurse()
2418 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_keys_recurse() argument
2426 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2428 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2429 ret = !b->level in bch_btree_map_keys_recurse()
2430 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2431 : btree(map_keys_recurse, k, b, op, from, fn, flags); in bch_btree_map_keys_recurse()
2438 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2439 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2440 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2477 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, in refill_keybuf_fn() argument