1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
40 
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90 
91 #define MAX_NEED_GC		64
92 #define MAX_SAVE_PRIO		72
93 #define MAX_GC_TIMES		100
94 #define MIN_GC_NODES		100
95 #define GC_SLEEP_MS		100
96 
97 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
98 
99 #define PTR_HASH(c, k)							\
100 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101 
102 static struct workqueue_struct *btree_io_wq;
103 
104 #define insert_lock(s, b)	((b)->level <= (s)->lock)
105 
106 
write_block(struct btree * b)107 static inline struct bset *write_block(struct btree *b)
108 {
109 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110 }
111 
bch_btree_init_next(struct btree * b)112 static void bch_btree_init_next(struct btree *b)
113 {
114 	/* If not a leaf node, always sort */
115 	if (b->level && b->keys.nsets)
116 		bch_btree_sort(&b->keys, &b->c->sort);
117 	else
118 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
119 
120 	if (b->written < btree_blocks(b))
121 		bch_bset_init_next(&b->keys, write_block(b),
122 				   bset_magic(&b->c->cache->sb));
123 
124 }
125 
126 /* Btree key manipulation */
127 
bkey_put(struct cache_set * c,struct bkey * k)128 void bkey_put(struct cache_set *c, struct bkey *k)
129 {
130 	unsigned int i;
131 
132 	for (i = 0; i < KEY_PTRS(k); i++)
133 		if (ptr_available(c, k, i))
134 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135 }
136 
137 /* Btree IO */
138 
btree_csum_set(struct btree * b,struct bset * i)139 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140 {
141 	uint64_t crc = b->key.ptr[0];
142 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
143 
144 	crc = crc64_be(crc, data, end - data);
145 	return crc ^ 0xffffffffffffffffULL;
146 }
147 
bch_btree_node_read_done(struct btree * b)148 void bch_btree_node_read_done(struct btree *b)
149 {
150 	const char *err = "bad btree header";
151 	struct bset *i = btree_bset_first(b);
152 	struct btree_iter *iter;
153 
154 	/*
155 	 * c->fill_iter can allocate an iterator with more memory space
156 	 * than static MAX_BSETS.
157 	 * See the comment arount cache_set->fill_iter.
158 	 */
159 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160 	iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161 	iter->used = 0;
162 
163 #ifdef CONFIG_BCACHE_DEBUG
164 	iter->b = &b->keys;
165 #endif
166 
167 	if (!i->seq)
168 		goto err;
169 
170 	for (;
171 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172 	     i = write_block(b)) {
173 		err = "unsupported bset version";
174 		if (i->version > BCACHE_BSET_VERSION)
175 			goto err;
176 
177 		err = "bad btree header";
178 		if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179 		    btree_blocks(b))
180 			goto err;
181 
182 		err = "bad magic";
183 		if (i->magic != bset_magic(&b->c->cache->sb))
184 			goto err;
185 
186 		err = "bad checksum";
187 		switch (i->version) {
188 		case 0:
189 			if (i->csum != csum_set(i))
190 				goto err;
191 			break;
192 		case BCACHE_BSET_VERSION:
193 			if (i->csum != btree_csum_set(b, i))
194 				goto err;
195 			break;
196 		}
197 
198 		err = "empty set";
199 		if (i != b->keys.set[0].data && !i->keys)
200 			goto err;
201 
202 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
203 
204 		b->written += set_blocks(i, block_bytes(b->c->cache));
205 	}
206 
207 	err = "corrupted btree";
208 	for (i = write_block(b);
209 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210 	     i = ((void *) i) + block_bytes(b->c->cache))
211 		if (i->seq == b->keys.set[0].data->seq)
212 			goto err;
213 
214 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
215 
216 	i = b->keys.set[0].data;
217 	err = "short btree key";
218 	if (b->keys.set[0].size &&
219 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220 		goto err;
221 
222 	if (b->written < btree_blocks(b))
223 		bch_bset_init_next(&b->keys, write_block(b),
224 				   bset_magic(&b->c->cache->sb));
225 out:
226 	mempool_free(iter, &b->c->fill_iter);
227 	return;
228 err:
229 	set_btree_node_io_error(b);
230 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
232 			    bset_block_offset(b, i), i->keys);
233 	goto out;
234 }
235 
btree_node_read_endio(struct bio * bio)236 static void btree_node_read_endio(struct bio *bio)
237 {
238 	struct closure *cl = bio->bi_private;
239 
240 	closure_put(cl);
241 }
242 
bch_btree_node_read(struct btree * b)243 static void bch_btree_node_read(struct btree *b)
244 {
245 	uint64_t start_time = local_clock();
246 	struct closure cl;
247 	struct bio *bio;
248 
249 	trace_bcache_btree_read(b);
250 
251 	closure_init_stack(&cl);
252 
253 	bio = bch_bbio_alloc(b->c);
254 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255 	bio->bi_end_io	= btree_node_read_endio;
256 	bio->bi_private	= &cl;
257 	bio->bi_opf = REQ_OP_READ | REQ_META;
258 
259 	bch_bio_map(bio, b->keys.set[0].data);
260 
261 	bch_submit_bbio(bio, b->c, &b->key, 0);
262 	closure_sync(&cl);
263 
264 	if (bio->bi_status)
265 		set_btree_node_io_error(b);
266 
267 	bch_bbio_free(bio, b->c);
268 
269 	if (btree_node_io_error(b))
270 		goto err;
271 
272 	bch_btree_node_read_done(b);
273 	bch_time_stats_update(&b->c->btree_read_time, start_time);
274 
275 	return;
276 err:
277 	bch_cache_set_error(b->c, "io error reading bucket %zu",
278 			    PTR_BUCKET_NR(b->c, &b->key, 0));
279 }
280 
btree_complete_write(struct btree * b,struct btree_write * w)281 static void btree_complete_write(struct btree *b, struct btree_write *w)
282 {
283 	if (w->prio_blocked &&
284 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285 		wake_up_allocators(b->c);
286 
287 	if (w->journal) {
288 		atomic_dec_bug(w->journal);
289 		__closure_wake_up(&b->c->journal.wait);
290 	}
291 
292 	w->prio_blocked	= 0;
293 	w->journal	= NULL;
294 }
295 
btree_node_write_unlock(struct closure * cl)296 static void btree_node_write_unlock(struct closure *cl)
297 {
298 	struct btree *b = container_of(cl, struct btree, io);
299 
300 	up(&b->io_mutex);
301 }
302 
__btree_node_write_done(struct closure * cl)303 static void __btree_node_write_done(struct closure *cl)
304 {
305 	struct btree *b = container_of(cl, struct btree, io);
306 	struct btree_write *w = btree_prev_write(b);
307 
308 	bch_bbio_free(b->bio, b->c);
309 	b->bio = NULL;
310 	btree_complete_write(b, w);
311 
312 	if (btree_node_dirty(b))
313 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314 
315 	closure_return_with_destructor(cl, btree_node_write_unlock);
316 }
317 
btree_node_write_done(struct closure * cl)318 static void btree_node_write_done(struct closure *cl)
319 {
320 	struct btree *b = container_of(cl, struct btree, io);
321 
322 	bio_free_pages(b->bio);
323 	__btree_node_write_done(cl);
324 }
325 
btree_node_write_endio(struct bio * bio)326 static void btree_node_write_endio(struct bio *bio)
327 {
328 	struct closure *cl = bio->bi_private;
329 	struct btree *b = container_of(cl, struct btree, io);
330 
331 	if (bio->bi_status)
332 		set_btree_node_io_error(b);
333 
334 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
335 	closure_put(cl);
336 }
337 
do_btree_node_write(struct btree * b)338 static void do_btree_node_write(struct btree *b)
339 {
340 	struct closure *cl = &b->io;
341 	struct bset *i = btree_bset_last(b);
342 	BKEY_PADDED(key) k;
343 
344 	i->version	= BCACHE_BSET_VERSION;
345 	i->csum		= btree_csum_set(b, i);
346 
347 	BUG_ON(b->bio);
348 	b->bio = bch_bbio_alloc(b->c);
349 
350 	b->bio->bi_end_io	= btree_node_write_endio;
351 	b->bio->bi_private	= cl;
352 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c->cache));
353 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
354 	bch_bio_map(b->bio, i);
355 
356 	/*
357 	 * If we're appending to a leaf node, we don't technically need FUA -
358 	 * this write just needs to be persisted before the next journal write,
359 	 * which will be marked FLUSH|FUA.
360 	 *
361 	 * Similarly if we're writing a new btree root - the pointer is going to
362 	 * be in the next journal entry.
363 	 *
364 	 * But if we're writing a new btree node (that isn't a root) or
365 	 * appending to a non leaf btree node, we need either FUA or a flush
366 	 * when we write the parent with the new pointer. FUA is cheaper than a
367 	 * flush, and writes appending to leaf nodes aren't blocking anything so
368 	 * just make all btree node writes FUA to keep things sane.
369 	 */
370 
371 	bkey_copy(&k.key, &b->key);
372 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373 		       bset_sector_offset(&b->keys, i));
374 
375 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376 		struct bio_vec *bv;
377 		void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378 		struct bvec_iter_all iter_all;
379 
380 		bio_for_each_segment_all(bv, b->bio, iter_all) {
381 			memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382 			addr += PAGE_SIZE;
383 		}
384 
385 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
386 
387 		continue_at(cl, btree_node_write_done, NULL);
388 	} else {
389 		/*
390 		 * No problem for multipage bvec since the bio is
391 		 * just allocated
392 		 */
393 		b->bio->bi_vcnt = 0;
394 		bch_bio_map(b->bio, i);
395 
396 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
397 
398 		closure_sync(cl);
399 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400 	}
401 }
402 
__bch_btree_node_write(struct btree * b,struct closure * parent)403 void __bch_btree_node_write(struct btree *b, struct closure *parent)
404 {
405 	struct bset *i = btree_bset_last(b);
406 
407 	lockdep_assert_held(&b->write_lock);
408 
409 	trace_bcache_btree_write(b);
410 
411 	BUG_ON(current->bio_list);
412 	BUG_ON(b->written >= btree_blocks(b));
413 	BUG_ON(b->written && !i->keys);
414 	BUG_ON(btree_bset_first(b)->seq != i->seq);
415 	bch_check_keys(&b->keys, "writing");
416 
417 	cancel_delayed_work(&b->work);
418 
419 	/* If caller isn't waiting for write, parent refcount is cache set */
420 	down(&b->io_mutex);
421 	closure_init(&b->io, parent ?: &b->c->cl);
422 
423 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
424 	change_bit(BTREE_NODE_write_idx, &b->flags);
425 
426 	do_btree_node_write(b);
427 
428 	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429 			&b->c->cache->btree_sectors_written);
430 
431 	b->written += set_blocks(i, block_bytes(b->c->cache));
432 }
433 
bch_btree_node_write(struct btree * b,struct closure * parent)434 void bch_btree_node_write(struct btree *b, struct closure *parent)
435 {
436 	unsigned int nsets = b->keys.nsets;
437 
438 	lockdep_assert_held(&b->lock);
439 
440 	__bch_btree_node_write(b, parent);
441 
442 	/*
443 	 * do verify if there was more than one set initially (i.e. we did a
444 	 * sort) and we sorted down to a single set:
445 	 */
446 	if (nsets && !b->keys.nsets)
447 		bch_btree_verify(b);
448 
449 	bch_btree_init_next(b);
450 }
451 
bch_btree_node_write_sync(struct btree * b)452 static void bch_btree_node_write_sync(struct btree *b)
453 {
454 	struct closure cl;
455 
456 	closure_init_stack(&cl);
457 
458 	mutex_lock(&b->write_lock);
459 	bch_btree_node_write(b, &cl);
460 	mutex_unlock(&b->write_lock);
461 
462 	closure_sync(&cl);
463 }
464 
btree_node_write_work(struct work_struct * w)465 static void btree_node_write_work(struct work_struct *w)
466 {
467 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468 
469 	mutex_lock(&b->write_lock);
470 	if (btree_node_dirty(b))
471 		__bch_btree_node_write(b, NULL);
472 	mutex_unlock(&b->write_lock);
473 }
474 
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)475 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476 {
477 	struct bset *i = btree_bset_last(b);
478 	struct btree_write *w = btree_current_write(b);
479 
480 	lockdep_assert_held(&b->write_lock);
481 
482 	BUG_ON(!b->written);
483 	BUG_ON(!i->keys);
484 
485 	if (!btree_node_dirty(b))
486 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
487 
488 	set_btree_node_dirty(b);
489 
490 	/*
491 	 * w->journal is always the oldest journal pin of all bkeys
492 	 * in the leaf node, to make sure the oldest jset seq won't
493 	 * be increased before this btree node is flushed.
494 	 */
495 	if (journal_ref) {
496 		if (w->journal &&
497 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
498 			atomic_dec_bug(w->journal);
499 			w->journal = NULL;
500 		}
501 
502 		if (!w->journal) {
503 			w->journal = journal_ref;
504 			atomic_inc(w->journal);
505 		}
506 	}
507 
508 	/* Force write if set is too big */
509 	if (set_bytes(i) > PAGE_SIZE - 48 &&
510 	    !current->bio_list)
511 		bch_btree_node_write(b, NULL);
512 }
513 
514 /*
515  * Btree in memory cache - allocation/freeing
516  * mca -> memory cache
517  */
518 
519 #define mca_reserve(c)	(((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 			  ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c)						\
522 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523 
mca_data_free(struct btree * b)524 static void mca_data_free(struct btree *b)
525 {
526 	BUG_ON(b->io_mutex.count != 1);
527 
528 	bch_btree_keys_free(&b->keys);
529 
530 	b->c->btree_cache_used--;
531 	list_move(&b->list, &b->c->btree_cache_freed);
532 }
533 
mca_bucket_free(struct btree * b)534 static void mca_bucket_free(struct btree *b)
535 {
536 	BUG_ON(btree_node_dirty(b));
537 
538 	b->key.ptr[0] = 0;
539 	hlist_del_init_rcu(&b->hash);
540 	list_move(&b->list, &b->c->btree_cache_freeable);
541 }
542 
btree_order(struct bkey * k)543 static unsigned int btree_order(struct bkey *k)
544 {
545 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546 }
547 
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549 {
550 	if (!bch_btree_keys_alloc(&b->keys,
551 				  max_t(unsigned int,
552 					ilog2(b->c->btree_pages),
553 					btree_order(k)),
554 				  gfp)) {
555 		b->c->btree_cache_used++;
556 		list_move(&b->list, &b->c->btree_cache);
557 	} else {
558 		list_move(&b->list, &b->c->btree_cache_freed);
559 	}
560 }
561 
562 #define cmp_int(l, r)		((l > r) - (l < r))
563 
564 #ifdef CONFIG_PROVE_LOCKING
btree_lock_cmp_fn(const struct lockdep_map * _a,const struct lockdep_map * _b)565 static int btree_lock_cmp_fn(const struct lockdep_map *_a,
566 			     const struct lockdep_map *_b)
567 {
568 	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
569 	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
570 
571 	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
572 }
573 
btree_lock_print_fn(const struct lockdep_map * map)574 static void btree_lock_print_fn(const struct lockdep_map *map)
575 {
576 	const struct btree *b = container_of(map, struct btree, lock.dep_map);
577 
578 	printk(KERN_CONT " l=%u %llu:%llu", b->level,
579 	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
580 }
581 #endif
582 
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)583 static struct btree *mca_bucket_alloc(struct cache_set *c,
584 				      struct bkey *k, gfp_t gfp)
585 {
586 	/*
587 	 * kzalloc() is necessary here for initialization,
588 	 * see code comments in bch_btree_keys_init().
589 	 */
590 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
591 
592 	if (!b)
593 		return NULL;
594 
595 	init_rwsem(&b->lock);
596 	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
597 	mutex_init(&b->write_lock);
598 	lockdep_set_novalidate_class(&b->write_lock);
599 	INIT_LIST_HEAD(&b->list);
600 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
601 	b->c = c;
602 	sema_init(&b->io_mutex, 1);
603 
604 	mca_data_alloc(b, k, gfp);
605 	return b;
606 }
607 
mca_reap(struct btree * b,unsigned int min_order,bool flush)608 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
609 {
610 	struct closure cl;
611 
612 	closure_init_stack(&cl);
613 	lockdep_assert_held(&b->c->bucket_lock);
614 
615 	if (!down_write_trylock(&b->lock))
616 		return -ENOMEM;
617 
618 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
619 
620 	if (b->keys.page_order < min_order)
621 		goto out_unlock;
622 
623 	if (!flush) {
624 		if (btree_node_dirty(b))
625 			goto out_unlock;
626 
627 		if (down_trylock(&b->io_mutex))
628 			goto out_unlock;
629 		up(&b->io_mutex);
630 	}
631 
632 retry:
633 	/*
634 	 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
635 	 * __bch_btree_node_write(). To avoid an extra flush, acquire
636 	 * b->write_lock before checking BTREE_NODE_dirty bit.
637 	 */
638 	mutex_lock(&b->write_lock);
639 	/*
640 	 * If this btree node is selected in btree_flush_write() by journal
641 	 * code, delay and retry until the node is flushed by journal code
642 	 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
643 	 */
644 	if (btree_node_journal_flush(b)) {
645 		pr_debug("bnode %p is flushing by journal, retry\n", b);
646 		mutex_unlock(&b->write_lock);
647 		udelay(1);
648 		goto retry;
649 	}
650 
651 	if (btree_node_dirty(b))
652 		__bch_btree_node_write(b, &cl);
653 	mutex_unlock(&b->write_lock);
654 
655 	closure_sync(&cl);
656 
657 	/* wait for any in flight btree write */
658 	down(&b->io_mutex);
659 	up(&b->io_mutex);
660 
661 	return 0;
662 out_unlock:
663 	rw_unlock(true, b);
664 	return -ENOMEM;
665 }
666 
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)667 static unsigned long bch_mca_scan(struct shrinker *shrink,
668 				  struct shrink_control *sc)
669 {
670 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
671 	struct btree *b, *t;
672 	unsigned long i, nr = sc->nr_to_scan;
673 	unsigned long freed = 0;
674 	unsigned int btree_cache_used;
675 
676 	if (c->shrinker_disabled)
677 		return SHRINK_STOP;
678 
679 	if (c->btree_cache_alloc_lock)
680 		return SHRINK_STOP;
681 
682 	/* Return -1 if we can't do anything right now */
683 	if (sc->gfp_mask & __GFP_IO)
684 		mutex_lock(&c->bucket_lock);
685 	else if (!mutex_trylock(&c->bucket_lock))
686 		return -1;
687 
688 	/*
689 	 * It's _really_ critical that we don't free too many btree nodes - we
690 	 * have to always leave ourselves a reserve. The reserve is how we
691 	 * guarantee that allocating memory for a new btree node can always
692 	 * succeed, so that inserting keys into the btree can always succeed and
693 	 * IO can always make forward progress:
694 	 */
695 	nr /= c->btree_pages;
696 	if (nr == 0)
697 		nr = 1;
698 	nr = min_t(unsigned long, nr, mca_can_free(c));
699 
700 	i = 0;
701 	btree_cache_used = c->btree_cache_used;
702 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
703 		if (nr <= 0)
704 			goto out;
705 
706 		if (!mca_reap(b, 0, false)) {
707 			mca_data_free(b);
708 			rw_unlock(true, b);
709 			freed++;
710 		}
711 		nr--;
712 		i++;
713 	}
714 
715 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
716 		if (nr <= 0 || i >= btree_cache_used)
717 			goto out;
718 
719 		if (!mca_reap(b, 0, false)) {
720 			mca_bucket_free(b);
721 			mca_data_free(b);
722 			rw_unlock(true, b);
723 			freed++;
724 		}
725 
726 		nr--;
727 		i++;
728 	}
729 out:
730 	mutex_unlock(&c->bucket_lock);
731 	return freed * c->btree_pages;
732 }
733 
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)734 static unsigned long bch_mca_count(struct shrinker *shrink,
735 				   struct shrink_control *sc)
736 {
737 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
738 
739 	if (c->shrinker_disabled)
740 		return 0;
741 
742 	if (c->btree_cache_alloc_lock)
743 		return 0;
744 
745 	return mca_can_free(c) * c->btree_pages;
746 }
747 
bch_btree_cache_free(struct cache_set * c)748 void bch_btree_cache_free(struct cache_set *c)
749 {
750 	struct btree *b;
751 	struct closure cl;
752 
753 	closure_init_stack(&cl);
754 
755 	if (c->shrink.list.next)
756 		unregister_shrinker(&c->shrink);
757 
758 	mutex_lock(&c->bucket_lock);
759 
760 #ifdef CONFIG_BCACHE_DEBUG
761 	if (c->verify_data)
762 		list_move(&c->verify_data->list, &c->btree_cache);
763 
764 	free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
765 #endif
766 
767 	list_splice(&c->btree_cache_freeable,
768 		    &c->btree_cache);
769 
770 	while (!list_empty(&c->btree_cache)) {
771 		b = list_first_entry(&c->btree_cache, struct btree, list);
772 
773 		/*
774 		 * This function is called by cache_set_free(), no I/O
775 		 * request on cache now, it is unnecessary to acquire
776 		 * b->write_lock before clearing BTREE_NODE_dirty anymore.
777 		 */
778 		if (btree_node_dirty(b)) {
779 			btree_complete_write(b, btree_current_write(b));
780 			clear_bit(BTREE_NODE_dirty, &b->flags);
781 		}
782 		mca_data_free(b);
783 	}
784 
785 	while (!list_empty(&c->btree_cache_freed)) {
786 		b = list_first_entry(&c->btree_cache_freed,
787 				     struct btree, list);
788 		list_del(&b->list);
789 		cancel_delayed_work_sync(&b->work);
790 		kfree(b);
791 	}
792 
793 	mutex_unlock(&c->bucket_lock);
794 }
795 
bch_btree_cache_alloc(struct cache_set * c)796 int bch_btree_cache_alloc(struct cache_set *c)
797 {
798 	unsigned int i;
799 
800 	for (i = 0; i < mca_reserve(c); i++)
801 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
802 			return -ENOMEM;
803 
804 	list_splice_init(&c->btree_cache,
805 			 &c->btree_cache_freeable);
806 
807 #ifdef CONFIG_BCACHE_DEBUG
808 	mutex_init(&c->verify_lock);
809 
810 	c->verify_ondisk = (void *)
811 		__get_free_pages(GFP_KERNEL|__GFP_COMP,
812 				 ilog2(meta_bucket_pages(&c->cache->sb)));
813 	if (!c->verify_ondisk) {
814 		/*
815 		 * Don't worry about the mca_rereserve buckets
816 		 * allocated in previous for-loop, they will be
817 		 * handled properly in bch_cache_set_unregister().
818 		 */
819 		return -ENOMEM;
820 	}
821 
822 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
823 
824 	if (c->verify_data &&
825 	    c->verify_data->keys.set->data)
826 		list_del_init(&c->verify_data->list);
827 	else
828 		c->verify_data = NULL;
829 #endif
830 
831 	c->shrink.count_objects = bch_mca_count;
832 	c->shrink.scan_objects = bch_mca_scan;
833 	c->shrink.seeks = 4;
834 	c->shrink.batch = c->btree_pages * 2;
835 
836 	if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
837 		pr_warn("bcache: %s: could not register shrinker\n",
838 				__func__);
839 
840 	return 0;
841 }
842 
843 /* Btree in memory cache - hash table */
844 
mca_hash(struct cache_set * c,struct bkey * k)845 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
846 {
847 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
848 }
849 
mca_find(struct cache_set * c,struct bkey * k)850 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
851 {
852 	struct btree *b;
853 
854 	rcu_read_lock();
855 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
856 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
857 			goto out;
858 	b = NULL;
859 out:
860 	rcu_read_unlock();
861 	return b;
862 }
863 
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)864 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
865 {
866 	spin_lock(&c->btree_cannibalize_lock);
867 	if (likely(c->btree_cache_alloc_lock == NULL)) {
868 		c->btree_cache_alloc_lock = current;
869 	} else if (c->btree_cache_alloc_lock != current) {
870 		if (op)
871 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
872 					TASK_UNINTERRUPTIBLE);
873 		spin_unlock(&c->btree_cannibalize_lock);
874 		return -EINTR;
875 	}
876 	spin_unlock(&c->btree_cannibalize_lock);
877 
878 	return 0;
879 }
880 
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)881 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
882 				     struct bkey *k)
883 {
884 	struct btree *b;
885 
886 	trace_bcache_btree_cache_cannibalize(c);
887 
888 	if (mca_cannibalize_lock(c, op))
889 		return ERR_PTR(-EINTR);
890 
891 	list_for_each_entry_reverse(b, &c->btree_cache, list)
892 		if (!mca_reap(b, btree_order(k), false))
893 			return b;
894 
895 	list_for_each_entry_reverse(b, &c->btree_cache, list)
896 		if (!mca_reap(b, btree_order(k), true))
897 			return b;
898 
899 	WARN(1, "btree cache cannibalize failed\n");
900 	return ERR_PTR(-ENOMEM);
901 }
902 
903 /*
904  * We can only have one thread cannibalizing other cached btree nodes at a time,
905  * or we'll deadlock. We use an open coded mutex to ensure that, which a
906  * cannibalize_bucket() will take. This means every time we unlock the root of
907  * the btree, we need to release this lock if we have it held.
908  */
bch_cannibalize_unlock(struct cache_set * c)909 void bch_cannibalize_unlock(struct cache_set *c)
910 {
911 	spin_lock(&c->btree_cannibalize_lock);
912 	if (c->btree_cache_alloc_lock == current) {
913 		c->btree_cache_alloc_lock = NULL;
914 		wake_up(&c->btree_cache_wait);
915 	}
916 	spin_unlock(&c->btree_cannibalize_lock);
917 }
918 
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)919 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
920 			       struct bkey *k, int level)
921 {
922 	struct btree *b;
923 
924 	BUG_ON(current->bio_list);
925 
926 	lockdep_assert_held(&c->bucket_lock);
927 
928 	if (mca_find(c, k))
929 		return NULL;
930 
931 	/* btree_free() doesn't free memory; it sticks the node on the end of
932 	 * the list. Check if there's any freed nodes there:
933 	 */
934 	list_for_each_entry(b, &c->btree_cache_freeable, list)
935 		if (!mca_reap(b, btree_order(k), false))
936 			goto out;
937 
938 	/* We never free struct btree itself, just the memory that holds the on
939 	 * disk node. Check the freed list before allocating a new one:
940 	 */
941 	list_for_each_entry(b, &c->btree_cache_freed, list)
942 		if (!mca_reap(b, 0, false)) {
943 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
944 			if (!b->keys.set[0].data)
945 				goto err;
946 			else
947 				goto out;
948 		}
949 
950 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
951 	if (!b)
952 		goto err;
953 
954 	BUG_ON(!down_write_trylock(&b->lock));
955 	if (!b->keys.set->data)
956 		goto err;
957 out:
958 	BUG_ON(b->io_mutex.count != 1);
959 
960 	bkey_copy(&b->key, k);
961 	list_move(&b->list, &c->btree_cache);
962 	hlist_del_init_rcu(&b->hash);
963 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
964 
965 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
966 	b->parent	= (void *) ~0UL;
967 	b->flags	= 0;
968 	b->written	= 0;
969 	b->level	= level;
970 
971 	if (!b->level)
972 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
973 				    &b->c->expensive_debug_checks);
974 	else
975 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
976 				    &b->c->expensive_debug_checks);
977 
978 	return b;
979 err:
980 	if (b)
981 		rw_unlock(true, b);
982 
983 	b = mca_cannibalize(c, op, k);
984 	if (!IS_ERR(b))
985 		goto out;
986 
987 	return b;
988 }
989 
990 /*
991  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
992  * in from disk if necessary.
993  *
994  * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
995  *
996  * The btree node will have either a read or a write lock held, depending on
997  * level and op->lock.
998  */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)999 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1000 				 struct bkey *k, int level, bool write,
1001 				 struct btree *parent)
1002 {
1003 	int i = 0;
1004 	struct btree *b;
1005 
1006 	BUG_ON(level < 0);
1007 retry:
1008 	b = mca_find(c, k);
1009 
1010 	if (!b) {
1011 		if (current->bio_list)
1012 			return ERR_PTR(-EAGAIN);
1013 
1014 		mutex_lock(&c->bucket_lock);
1015 		b = mca_alloc(c, op, k, level);
1016 		mutex_unlock(&c->bucket_lock);
1017 
1018 		if (!b)
1019 			goto retry;
1020 		if (IS_ERR(b))
1021 			return b;
1022 
1023 		bch_btree_node_read(b);
1024 
1025 		if (!write)
1026 			downgrade_write(&b->lock);
1027 	} else {
1028 		rw_lock(write, b, level);
1029 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1030 			rw_unlock(write, b);
1031 			goto retry;
1032 		}
1033 		BUG_ON(b->level != level);
1034 	}
1035 
1036 	if (btree_node_io_error(b)) {
1037 		rw_unlock(write, b);
1038 		return ERR_PTR(-EIO);
1039 	}
1040 
1041 	BUG_ON(!b->written);
1042 
1043 	b->parent = parent;
1044 
1045 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1046 		prefetch(b->keys.set[i].tree);
1047 		prefetch(b->keys.set[i].data);
1048 	}
1049 
1050 	for (; i <= b->keys.nsets; i++)
1051 		prefetch(b->keys.set[i].data);
1052 
1053 	return b;
1054 }
1055 
btree_node_prefetch(struct btree * parent,struct bkey * k)1056 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1057 {
1058 	struct btree *b;
1059 
1060 	mutex_lock(&parent->c->bucket_lock);
1061 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1062 	mutex_unlock(&parent->c->bucket_lock);
1063 
1064 	if (!IS_ERR_OR_NULL(b)) {
1065 		b->parent = parent;
1066 		bch_btree_node_read(b);
1067 		rw_unlock(true, b);
1068 	}
1069 }
1070 
1071 /* Btree alloc */
1072 
btree_node_free(struct btree * b)1073 static void btree_node_free(struct btree *b)
1074 {
1075 	trace_bcache_btree_node_free(b);
1076 
1077 	BUG_ON(b == b->c->root);
1078 
1079 retry:
1080 	mutex_lock(&b->write_lock);
1081 	/*
1082 	 * If the btree node is selected and flushing in btree_flush_write(),
1083 	 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1084 	 * then it is safe to free the btree node here. Otherwise this btree
1085 	 * node will be in race condition.
1086 	 */
1087 	if (btree_node_journal_flush(b)) {
1088 		mutex_unlock(&b->write_lock);
1089 		pr_debug("bnode %p journal_flush set, retry\n", b);
1090 		udelay(1);
1091 		goto retry;
1092 	}
1093 
1094 	if (btree_node_dirty(b)) {
1095 		btree_complete_write(b, btree_current_write(b));
1096 		clear_bit(BTREE_NODE_dirty, &b->flags);
1097 	}
1098 
1099 	mutex_unlock(&b->write_lock);
1100 
1101 	cancel_delayed_work(&b->work);
1102 
1103 	mutex_lock(&b->c->bucket_lock);
1104 	bch_bucket_free(b->c, &b->key);
1105 	mca_bucket_free(b);
1106 	mutex_unlock(&b->c->bucket_lock);
1107 }
1108 
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1109 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1110 				     int level, bool wait,
1111 				     struct btree *parent)
1112 {
1113 	BKEY_PADDED(key) k;
1114 	struct btree *b;
1115 
1116 	mutex_lock(&c->bucket_lock);
1117 retry:
1118 	/* return ERR_PTR(-EAGAIN) when it fails */
1119 	b = ERR_PTR(-EAGAIN);
1120 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1121 		goto err;
1122 
1123 	bkey_put(c, &k.key);
1124 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1125 
1126 	b = mca_alloc(c, op, &k.key, level);
1127 	if (IS_ERR(b))
1128 		goto err_free;
1129 
1130 	if (!b) {
1131 		cache_bug(c,
1132 			"Tried to allocate bucket that was in btree cache");
1133 		goto retry;
1134 	}
1135 
1136 	b->parent = parent;
1137 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1138 
1139 	mutex_unlock(&c->bucket_lock);
1140 
1141 	trace_bcache_btree_node_alloc(b);
1142 	return b;
1143 err_free:
1144 	bch_bucket_free(c, &k.key);
1145 err:
1146 	mutex_unlock(&c->bucket_lock);
1147 
1148 	trace_bcache_btree_node_alloc_fail(c);
1149 	return b;
1150 }
1151 
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1152 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1153 					  struct btree_op *op, int level,
1154 					  struct btree *parent)
1155 {
1156 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1157 }
1158 
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1159 static struct btree *btree_node_alloc_replacement(struct btree *b,
1160 						  struct btree_op *op)
1161 {
1162 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1163 
1164 	if (!IS_ERR(n)) {
1165 		mutex_lock(&n->write_lock);
1166 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1167 		bkey_copy_key(&n->key, &b->key);
1168 		mutex_unlock(&n->write_lock);
1169 	}
1170 
1171 	return n;
1172 }
1173 
make_btree_freeing_key(struct btree * b,struct bkey * k)1174 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1175 {
1176 	unsigned int i;
1177 
1178 	mutex_lock(&b->c->bucket_lock);
1179 
1180 	atomic_inc(&b->c->prio_blocked);
1181 
1182 	bkey_copy(k, &b->key);
1183 	bkey_copy_key(k, &ZERO_KEY);
1184 
1185 	for (i = 0; i < KEY_PTRS(k); i++)
1186 		SET_PTR_GEN(k, i,
1187 			    bch_inc_gen(b->c->cache,
1188 					PTR_BUCKET(b->c, &b->key, i)));
1189 
1190 	mutex_unlock(&b->c->bucket_lock);
1191 }
1192 
btree_check_reserve(struct btree * b,struct btree_op * op)1193 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1194 {
1195 	struct cache_set *c = b->c;
1196 	struct cache *ca = c->cache;
1197 	unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1198 
1199 	mutex_lock(&c->bucket_lock);
1200 
1201 	if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1202 		if (op)
1203 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
1204 					TASK_UNINTERRUPTIBLE);
1205 		mutex_unlock(&c->bucket_lock);
1206 		return -EINTR;
1207 	}
1208 
1209 	mutex_unlock(&c->bucket_lock);
1210 
1211 	return mca_cannibalize_lock(b->c, op);
1212 }
1213 
1214 /* Garbage collection */
1215 
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1216 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1217 				    struct bkey *k)
1218 {
1219 	uint8_t stale = 0;
1220 	unsigned int i;
1221 	struct bucket *g;
1222 
1223 	/*
1224 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1225 	 * freed, but since ptr_bad() returns true we'll never actually use them
1226 	 * for anything and thus we don't want mark their pointers here
1227 	 */
1228 	if (!bkey_cmp(k, &ZERO_KEY))
1229 		return stale;
1230 
1231 	for (i = 0; i < KEY_PTRS(k); i++) {
1232 		if (!ptr_available(c, k, i))
1233 			continue;
1234 
1235 		g = PTR_BUCKET(c, k, i);
1236 
1237 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1238 			g->last_gc = PTR_GEN(k, i);
1239 
1240 		if (ptr_stale(c, k, i)) {
1241 			stale = max(stale, ptr_stale(c, k, i));
1242 			continue;
1243 		}
1244 
1245 		cache_bug_on(GC_MARK(g) &&
1246 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1247 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1248 			     GC_MARK(g), level);
1249 
1250 		if (level)
1251 			SET_GC_MARK(g, GC_MARK_METADATA);
1252 		else if (KEY_DIRTY(k))
1253 			SET_GC_MARK(g, GC_MARK_DIRTY);
1254 		else if (!GC_MARK(g))
1255 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1256 
1257 		/* guard against overflow */
1258 		SET_GC_SECTORS_USED(g, min_t(unsigned int,
1259 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1260 					     MAX_GC_SECTORS_USED));
1261 
1262 		BUG_ON(!GC_SECTORS_USED(g));
1263 	}
1264 
1265 	return stale;
1266 }
1267 
1268 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1269 
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1270 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1271 {
1272 	unsigned int i;
1273 
1274 	for (i = 0; i < KEY_PTRS(k); i++)
1275 		if (ptr_available(c, k, i) &&
1276 		    !ptr_stale(c, k, i)) {
1277 			struct bucket *b = PTR_BUCKET(c, k, i);
1278 
1279 			b->gen = PTR_GEN(k, i);
1280 
1281 			if (level && bkey_cmp(k, &ZERO_KEY))
1282 				b->prio = BTREE_PRIO;
1283 			else if (!level && b->prio == BTREE_PRIO)
1284 				b->prio = INITIAL_PRIO;
1285 		}
1286 
1287 	__bch_btree_mark_key(c, level, k);
1288 }
1289 
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1290 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1291 {
1292 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1293 }
1294 
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1295 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1296 {
1297 	uint8_t stale = 0;
1298 	unsigned int keys = 0, good_keys = 0;
1299 	struct bkey *k;
1300 	struct btree_iter iter;
1301 	struct bset_tree *t;
1302 
1303 	gc->nodes++;
1304 
1305 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1306 		stale = max(stale, btree_mark_key(b, k));
1307 		keys++;
1308 
1309 		if (bch_ptr_bad(&b->keys, k))
1310 			continue;
1311 
1312 		gc->key_bytes += bkey_u64s(k);
1313 		gc->nkeys++;
1314 		good_keys++;
1315 
1316 		gc->data += KEY_SIZE(k);
1317 	}
1318 
1319 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1320 		btree_bug_on(t->size &&
1321 			     bset_written(&b->keys, t) &&
1322 			     bkey_cmp(&b->key, &t->end) < 0,
1323 			     b, "found short btree key in gc");
1324 
1325 	if (b->c->gc_always_rewrite)
1326 		return true;
1327 
1328 	if (stale > 10)
1329 		return true;
1330 
1331 	if ((keys - good_keys) * 2 > keys)
1332 		return true;
1333 
1334 	return false;
1335 }
1336 
1337 #define GC_MERGE_NODES	4U
1338 
1339 struct gc_merge_info {
1340 	struct btree	*b;
1341 	unsigned int	keys;
1342 };
1343 
1344 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1345 				 struct keylist *insert_keys,
1346 				 atomic_t *journal_ref,
1347 				 struct bkey *replace_key);
1348 
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1349 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1350 			     struct gc_stat *gc, struct gc_merge_info *r)
1351 {
1352 	unsigned int i, nodes = 0, keys = 0, blocks;
1353 	struct btree *new_nodes[GC_MERGE_NODES];
1354 	struct keylist keylist;
1355 	struct closure cl;
1356 	struct bkey *k;
1357 
1358 	bch_keylist_init(&keylist);
1359 
1360 	if (btree_check_reserve(b, NULL))
1361 		return 0;
1362 
1363 	memset(new_nodes, 0, sizeof(new_nodes));
1364 	closure_init_stack(&cl);
1365 
1366 	while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
1367 		keys += r[nodes++].keys;
1368 
1369 	blocks = btree_default_blocks(b->c) * 2 / 3;
1370 
1371 	if (nodes < 2 ||
1372 	    __set_blocks(b->keys.set[0].data, keys,
1373 			 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1374 		return 0;
1375 
1376 	for (i = 0; i < nodes; i++) {
1377 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1378 		if (IS_ERR(new_nodes[i]))
1379 			goto out_nocoalesce;
1380 	}
1381 
1382 	/*
1383 	 * We have to check the reserve here, after we've allocated our new
1384 	 * nodes, to make sure the insert below will succeed - we also check
1385 	 * before as an optimization to potentially avoid a bunch of expensive
1386 	 * allocs/sorts
1387 	 */
1388 	if (btree_check_reserve(b, NULL))
1389 		goto out_nocoalesce;
1390 
1391 	for (i = 0; i < nodes; i++)
1392 		mutex_lock(&new_nodes[i]->write_lock);
1393 
1394 	for (i = nodes - 1; i > 0; --i) {
1395 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1396 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1397 		struct bkey *k, *last = NULL;
1398 
1399 		keys = 0;
1400 
1401 		if (i > 1) {
1402 			for (k = n2->start;
1403 			     k < bset_bkey_last(n2);
1404 			     k = bkey_next(k)) {
1405 				if (__set_blocks(n1, n1->keys + keys +
1406 						 bkey_u64s(k),
1407 						 block_bytes(b->c->cache)) > blocks)
1408 					break;
1409 
1410 				last = k;
1411 				keys += bkey_u64s(k);
1412 			}
1413 		} else {
1414 			/*
1415 			 * Last node we're not getting rid of - we're getting
1416 			 * rid of the node at r[0]. Have to try and fit all of
1417 			 * the remaining keys into this node; we can't ensure
1418 			 * they will always fit due to rounding and variable
1419 			 * length keys (shouldn't be possible in practice,
1420 			 * though)
1421 			 */
1422 			if (__set_blocks(n1, n1->keys + n2->keys,
1423 					 block_bytes(b->c->cache)) >
1424 			    btree_blocks(new_nodes[i]))
1425 				goto out_unlock_nocoalesce;
1426 
1427 			keys = n2->keys;
1428 			/* Take the key of the node we're getting rid of */
1429 			last = &r->b->key;
1430 		}
1431 
1432 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1433 		       btree_blocks(new_nodes[i]));
1434 
1435 		if (last)
1436 			bkey_copy_key(&new_nodes[i]->key, last);
1437 
1438 		memcpy(bset_bkey_last(n1),
1439 		       n2->start,
1440 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1441 
1442 		n1->keys += keys;
1443 		r[i].keys = n1->keys;
1444 
1445 		memmove(n2->start,
1446 			bset_bkey_idx(n2, keys),
1447 			(void *) bset_bkey_last(n2) -
1448 			(void *) bset_bkey_idx(n2, keys));
1449 
1450 		n2->keys -= keys;
1451 
1452 		if (__bch_keylist_realloc(&keylist,
1453 					  bkey_u64s(&new_nodes[i]->key)))
1454 			goto out_unlock_nocoalesce;
1455 
1456 		bch_btree_node_write(new_nodes[i], &cl);
1457 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1458 	}
1459 
1460 	for (i = 0; i < nodes; i++)
1461 		mutex_unlock(&new_nodes[i]->write_lock);
1462 
1463 	closure_sync(&cl);
1464 
1465 	/* We emptied out this node */
1466 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1467 	btree_node_free(new_nodes[0]);
1468 	rw_unlock(true, new_nodes[0]);
1469 	new_nodes[0] = NULL;
1470 
1471 	for (i = 0; i < nodes; i++) {
1472 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1473 			goto out_nocoalesce;
1474 
1475 		make_btree_freeing_key(r[i].b, keylist.top);
1476 		bch_keylist_push(&keylist);
1477 	}
1478 
1479 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1480 	BUG_ON(!bch_keylist_empty(&keylist));
1481 
1482 	for (i = 0; i < nodes; i++) {
1483 		btree_node_free(r[i].b);
1484 		rw_unlock(true, r[i].b);
1485 
1486 		r[i].b = new_nodes[i];
1487 	}
1488 
1489 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1490 	r[nodes - 1].b = ERR_PTR(-EINTR);
1491 
1492 	trace_bcache_btree_gc_coalesce(nodes);
1493 	gc->nodes--;
1494 
1495 	bch_keylist_free(&keylist);
1496 
1497 	/* Invalidated our iterator */
1498 	return -EINTR;
1499 
1500 out_unlock_nocoalesce:
1501 	for (i = 0; i < nodes; i++)
1502 		mutex_unlock(&new_nodes[i]->write_lock);
1503 
1504 out_nocoalesce:
1505 	closure_sync(&cl);
1506 
1507 	while ((k = bch_keylist_pop(&keylist)))
1508 		if (!bkey_cmp(k, &ZERO_KEY))
1509 			atomic_dec(&b->c->prio_blocked);
1510 	bch_keylist_free(&keylist);
1511 
1512 	for (i = 0; i < nodes; i++)
1513 		if (!IS_ERR(new_nodes[i])) {
1514 			btree_node_free(new_nodes[i]);
1515 			rw_unlock(true, new_nodes[i]);
1516 		}
1517 	return 0;
1518 }
1519 
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1520 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1521 				 struct btree *replace)
1522 {
1523 	struct keylist keys;
1524 	struct btree *n;
1525 
1526 	if (btree_check_reserve(b, NULL))
1527 		return 0;
1528 
1529 	n = btree_node_alloc_replacement(replace, NULL);
1530 
1531 	/* recheck reserve after allocating replacement node */
1532 	if (btree_check_reserve(b, NULL)) {
1533 		btree_node_free(n);
1534 		rw_unlock(true, n);
1535 		return 0;
1536 	}
1537 
1538 	bch_btree_node_write_sync(n);
1539 
1540 	bch_keylist_init(&keys);
1541 	bch_keylist_add(&keys, &n->key);
1542 
1543 	make_btree_freeing_key(replace, keys.top);
1544 	bch_keylist_push(&keys);
1545 
1546 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1547 	BUG_ON(!bch_keylist_empty(&keys));
1548 
1549 	btree_node_free(replace);
1550 	rw_unlock(true, n);
1551 
1552 	/* Invalidated our iterator */
1553 	return -EINTR;
1554 }
1555 
btree_gc_count_keys(struct btree * b)1556 static unsigned int btree_gc_count_keys(struct btree *b)
1557 {
1558 	struct bkey *k;
1559 	struct btree_iter iter;
1560 	unsigned int ret = 0;
1561 
1562 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1563 		ret += bkey_u64s(k);
1564 
1565 	return ret;
1566 }
1567 
btree_gc_min_nodes(struct cache_set * c)1568 static size_t btree_gc_min_nodes(struct cache_set *c)
1569 {
1570 	size_t min_nodes;
1571 
1572 	/*
1573 	 * Since incremental GC would stop 100ms when front
1574 	 * side I/O comes, so when there are many btree nodes,
1575 	 * if GC only processes constant (100) nodes each time,
1576 	 * GC would last a long time, and the front side I/Os
1577 	 * would run out of the buckets (since no new bucket
1578 	 * can be allocated during GC), and be blocked again.
1579 	 * So GC should not process constant nodes, but varied
1580 	 * nodes according to the number of btree nodes, which
1581 	 * realized by dividing GC into constant(100) times,
1582 	 * so when there are many btree nodes, GC can process
1583 	 * more nodes each time, otherwise, GC will process less
1584 	 * nodes each time (but no less than MIN_GC_NODES)
1585 	 */
1586 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1587 	if (min_nodes < MIN_GC_NODES)
1588 		min_nodes = MIN_GC_NODES;
1589 
1590 	return min_nodes;
1591 }
1592 
1593 
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1594 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1595 			    struct closure *writes, struct gc_stat *gc)
1596 {
1597 	int ret = 0;
1598 	bool should_rewrite;
1599 	struct bkey *k;
1600 	struct btree_iter iter;
1601 	struct gc_merge_info r[GC_MERGE_NODES];
1602 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1603 
1604 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1605 
1606 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1607 		i->b = ERR_PTR(-EINTR);
1608 
1609 	while (1) {
1610 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1611 		if (k) {
1612 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1613 						  true, b);
1614 			if (IS_ERR(r->b)) {
1615 				ret = PTR_ERR(r->b);
1616 				break;
1617 			}
1618 
1619 			r->keys = btree_gc_count_keys(r->b);
1620 
1621 			ret = btree_gc_coalesce(b, op, gc, r);
1622 			if (ret)
1623 				break;
1624 		}
1625 
1626 		if (!last->b)
1627 			break;
1628 
1629 		if (!IS_ERR(last->b)) {
1630 			should_rewrite = btree_gc_mark_node(last->b, gc);
1631 			if (should_rewrite) {
1632 				ret = btree_gc_rewrite_node(b, op, last->b);
1633 				if (ret)
1634 					break;
1635 			}
1636 
1637 			if (last->b->level) {
1638 				ret = btree_gc_recurse(last->b, op, writes, gc);
1639 				if (ret)
1640 					break;
1641 			}
1642 
1643 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1644 
1645 			/*
1646 			 * Must flush leaf nodes before gc ends, since replace
1647 			 * operations aren't journalled
1648 			 */
1649 			mutex_lock(&last->b->write_lock);
1650 			if (btree_node_dirty(last->b))
1651 				bch_btree_node_write(last->b, writes);
1652 			mutex_unlock(&last->b->write_lock);
1653 			rw_unlock(true, last->b);
1654 		}
1655 
1656 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1657 		r->b = NULL;
1658 
1659 		if (atomic_read(&b->c->search_inflight) &&
1660 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1661 			gc->nodes_pre =  gc->nodes;
1662 			ret = -EAGAIN;
1663 			break;
1664 		}
1665 
1666 		if (need_resched()) {
1667 			ret = -EAGAIN;
1668 			break;
1669 		}
1670 	}
1671 
1672 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1673 		if (!IS_ERR_OR_NULL(i->b)) {
1674 			mutex_lock(&i->b->write_lock);
1675 			if (btree_node_dirty(i->b))
1676 				bch_btree_node_write(i->b, writes);
1677 			mutex_unlock(&i->b->write_lock);
1678 			rw_unlock(true, i->b);
1679 		}
1680 
1681 	return ret;
1682 }
1683 
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1684 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1685 			     struct closure *writes, struct gc_stat *gc)
1686 {
1687 	struct btree *n = NULL;
1688 	int ret = 0;
1689 	bool should_rewrite;
1690 
1691 	should_rewrite = btree_gc_mark_node(b, gc);
1692 	if (should_rewrite) {
1693 		n = btree_node_alloc_replacement(b, NULL);
1694 
1695 		if (!IS_ERR(n)) {
1696 			bch_btree_node_write_sync(n);
1697 
1698 			bch_btree_set_root(n);
1699 			btree_node_free(b);
1700 			rw_unlock(true, n);
1701 
1702 			return -EINTR;
1703 		}
1704 	}
1705 
1706 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1707 
1708 	if (b->level) {
1709 		ret = btree_gc_recurse(b, op, writes, gc);
1710 		if (ret)
1711 			return ret;
1712 	}
1713 
1714 	bkey_copy_key(&b->c->gc_done, &b->key);
1715 
1716 	return ret;
1717 }
1718 
btree_gc_start(struct cache_set * c)1719 static void btree_gc_start(struct cache_set *c)
1720 {
1721 	struct cache *ca;
1722 	struct bucket *b;
1723 
1724 	if (!c->gc_mark_valid)
1725 		return;
1726 
1727 	mutex_lock(&c->bucket_lock);
1728 
1729 	c->gc_mark_valid = 0;
1730 	c->gc_done = ZERO_KEY;
1731 
1732 	ca = c->cache;
1733 	for_each_bucket(b, ca) {
1734 		b->last_gc = b->gen;
1735 		if (!atomic_read(&b->pin)) {
1736 			SET_GC_MARK(b, 0);
1737 			SET_GC_SECTORS_USED(b, 0);
1738 		}
1739 	}
1740 
1741 	mutex_unlock(&c->bucket_lock);
1742 }
1743 
bch_btree_gc_finish(struct cache_set * c)1744 static void bch_btree_gc_finish(struct cache_set *c)
1745 {
1746 	struct bucket *b;
1747 	struct cache *ca;
1748 	unsigned int i, j;
1749 	uint64_t *k;
1750 
1751 	mutex_lock(&c->bucket_lock);
1752 
1753 	set_gc_sectors(c);
1754 	c->gc_mark_valid = 1;
1755 	c->need_gc	= 0;
1756 
1757 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1758 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1759 			    GC_MARK_METADATA);
1760 
1761 	/* don't reclaim buckets to which writeback keys point */
1762 	rcu_read_lock();
1763 	for (i = 0; i < c->devices_max_used; i++) {
1764 		struct bcache_device *d = c->devices[i];
1765 		struct cached_dev *dc;
1766 		struct keybuf_key *w, *n;
1767 
1768 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1769 			continue;
1770 		dc = container_of(d, struct cached_dev, disk);
1771 
1772 		spin_lock(&dc->writeback_keys.lock);
1773 		rbtree_postorder_for_each_entry_safe(w, n,
1774 					&dc->writeback_keys.keys, node)
1775 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1776 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1777 					    GC_MARK_DIRTY);
1778 		spin_unlock(&dc->writeback_keys.lock);
1779 	}
1780 	rcu_read_unlock();
1781 
1782 	c->avail_nbuckets = 0;
1783 
1784 	ca = c->cache;
1785 	ca->invalidate_needs_gc = 0;
1786 
1787 	for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1788 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1789 
1790 	for (k = ca->prio_buckets;
1791 	     k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1792 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1793 
1794 	for_each_bucket(b, ca) {
1795 		c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1796 
1797 		if (atomic_read(&b->pin))
1798 			continue;
1799 
1800 		BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1801 
1802 		if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1803 			c->avail_nbuckets++;
1804 	}
1805 
1806 	mutex_unlock(&c->bucket_lock);
1807 }
1808 
bch_btree_gc(struct cache_set * c)1809 static void bch_btree_gc(struct cache_set *c)
1810 {
1811 	int ret;
1812 	struct gc_stat stats;
1813 	struct closure writes;
1814 	struct btree_op op;
1815 	uint64_t start_time = local_clock();
1816 
1817 	trace_bcache_gc_start(c);
1818 
1819 	memset(&stats, 0, sizeof(struct gc_stat));
1820 	closure_init_stack(&writes);
1821 	bch_btree_op_init(&op, SHRT_MAX);
1822 
1823 	btree_gc_start(c);
1824 
1825 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1826 	do {
1827 		ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1828 		closure_sync(&writes);
1829 		cond_resched();
1830 
1831 		if (ret == -EAGAIN)
1832 			schedule_timeout_interruptible(msecs_to_jiffies
1833 						       (GC_SLEEP_MS));
1834 		else if (ret)
1835 			pr_warn("gc failed!\n");
1836 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1837 
1838 	bch_btree_gc_finish(c);
1839 	wake_up_allocators(c);
1840 
1841 	bch_time_stats_update(&c->btree_gc_time, start_time);
1842 
1843 	stats.key_bytes *= sizeof(uint64_t);
1844 	stats.data	<<= 9;
1845 	bch_update_bucket_in_use(c, &stats);
1846 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1847 
1848 	trace_bcache_gc_end(c);
1849 
1850 	bch_moving_gc(c);
1851 }
1852 
gc_should_run(struct cache_set * c)1853 static bool gc_should_run(struct cache_set *c)
1854 {
1855 	struct cache *ca = c->cache;
1856 
1857 	if (ca->invalidate_needs_gc)
1858 		return true;
1859 
1860 	if (atomic_read(&c->sectors_to_gc) < 0)
1861 		return true;
1862 
1863 	return false;
1864 }
1865 
bch_gc_thread(void * arg)1866 static int bch_gc_thread(void *arg)
1867 {
1868 	struct cache_set *c = arg;
1869 
1870 	while (1) {
1871 		wait_event_interruptible(c->gc_wait,
1872 			   kthread_should_stop() ||
1873 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1874 			   gc_should_run(c));
1875 
1876 		if (kthread_should_stop() ||
1877 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1878 			break;
1879 
1880 		set_gc_sectors(c);
1881 		bch_btree_gc(c);
1882 	}
1883 
1884 	wait_for_kthread_stop();
1885 	return 0;
1886 }
1887 
bch_gc_thread_start(struct cache_set * c)1888 int bch_gc_thread_start(struct cache_set *c)
1889 {
1890 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1891 	return PTR_ERR_OR_ZERO(c->gc_thread);
1892 }
1893 
1894 /* Initial partial gc */
1895 
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1896 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1897 {
1898 	int ret = 0;
1899 	struct bkey *k, *p = NULL;
1900 	struct btree_iter iter;
1901 
1902 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1903 		bch_initial_mark_key(b->c, b->level, k);
1904 
1905 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1906 
1907 	if (b->level) {
1908 		bch_btree_iter_init(&b->keys, &iter, NULL);
1909 
1910 		do {
1911 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1912 						       bch_ptr_bad);
1913 			if (k) {
1914 				btree_node_prefetch(b, k);
1915 				/*
1916 				 * initiallize c->gc_stats.nodes
1917 				 * for incremental GC
1918 				 */
1919 				b->c->gc_stats.nodes++;
1920 			}
1921 
1922 			if (p)
1923 				ret = bcache_btree(check_recurse, p, b, op);
1924 
1925 			p = k;
1926 		} while (p && !ret);
1927 	}
1928 
1929 	return ret;
1930 }
1931 
1932 
bch_btree_check_thread(void * arg)1933 static int bch_btree_check_thread(void *arg)
1934 {
1935 	int ret;
1936 	struct btree_check_info *info = arg;
1937 	struct btree_check_state *check_state = info->state;
1938 	struct cache_set *c = check_state->c;
1939 	struct btree_iter iter;
1940 	struct bkey *k, *p;
1941 	int cur_idx, prev_idx, skip_nr;
1942 
1943 	k = p = NULL;
1944 	cur_idx = prev_idx = 0;
1945 	ret = 0;
1946 
1947 	/* root node keys are checked before thread created */
1948 	bch_btree_iter_init(&c->root->keys, &iter, NULL);
1949 	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1950 	BUG_ON(!k);
1951 
1952 	p = k;
1953 	while (k) {
1954 		/*
1955 		 * Fetch a root node key index, skip the keys which
1956 		 * should be fetched by other threads, then check the
1957 		 * sub-tree indexed by the fetched key.
1958 		 */
1959 		spin_lock(&check_state->idx_lock);
1960 		cur_idx = check_state->key_idx;
1961 		check_state->key_idx++;
1962 		spin_unlock(&check_state->idx_lock);
1963 
1964 		skip_nr = cur_idx - prev_idx;
1965 
1966 		while (skip_nr) {
1967 			k = bch_btree_iter_next_filter(&iter,
1968 						       &c->root->keys,
1969 						       bch_ptr_bad);
1970 			if (k)
1971 				p = k;
1972 			else {
1973 				/*
1974 				 * No more keys to check in root node,
1975 				 * current checking threads are enough,
1976 				 * stop creating more.
1977 				 */
1978 				atomic_set(&check_state->enough, 1);
1979 				/* Update check_state->enough earlier */
1980 				smp_mb__after_atomic();
1981 				goto out;
1982 			}
1983 			skip_nr--;
1984 			cond_resched();
1985 		}
1986 
1987 		if (p) {
1988 			struct btree_op op;
1989 
1990 			btree_node_prefetch(c->root, p);
1991 			c->gc_stats.nodes++;
1992 			bch_btree_op_init(&op, 0);
1993 			ret = bcache_btree(check_recurse, p, c->root, &op);
1994 			/*
1995 			 * The op may be added to cache_set's btree_cache_wait
1996 			 * in mca_cannibalize(), must ensure it is removed from
1997 			 * the list and release btree_cache_alloc_lock before
1998 			 * free op memory.
1999 			 * Otherwise, the btree_cache_wait will be damaged.
2000 			 */
2001 			bch_cannibalize_unlock(c);
2002 			finish_wait(&c->btree_cache_wait, &(&op)->wait);
2003 			if (ret)
2004 				goto out;
2005 		}
2006 		p = NULL;
2007 		prev_idx = cur_idx;
2008 		cond_resched();
2009 	}
2010 
2011 out:
2012 	info->result = ret;
2013 	/* update check_state->started among all CPUs */
2014 	smp_mb__before_atomic();
2015 	if (atomic_dec_and_test(&check_state->started))
2016 		wake_up(&check_state->wait);
2017 
2018 	return ret;
2019 }
2020 
2021 
2022 
bch_btree_chkthread_nr(void)2023 static int bch_btree_chkthread_nr(void)
2024 {
2025 	int n = num_online_cpus()/2;
2026 
2027 	if (n == 0)
2028 		n = 1;
2029 	else if (n > BCH_BTR_CHKTHREAD_MAX)
2030 		n = BCH_BTR_CHKTHREAD_MAX;
2031 
2032 	return n;
2033 }
2034 
bch_btree_check(struct cache_set * c)2035 int bch_btree_check(struct cache_set *c)
2036 {
2037 	int ret = 0;
2038 	int i;
2039 	struct bkey *k = NULL;
2040 	struct btree_iter iter;
2041 	struct btree_check_state check_state;
2042 
2043 	/* check and mark root node keys */
2044 	for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2045 		bch_initial_mark_key(c, c->root->level, k);
2046 
2047 	bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2048 
2049 	if (c->root->level == 0)
2050 		return 0;
2051 
2052 	memset(&check_state, 0, sizeof(struct btree_check_state));
2053 	check_state.c = c;
2054 	check_state.total_threads = bch_btree_chkthread_nr();
2055 	check_state.key_idx = 0;
2056 	spin_lock_init(&check_state.idx_lock);
2057 	atomic_set(&check_state.started, 0);
2058 	atomic_set(&check_state.enough, 0);
2059 	init_waitqueue_head(&check_state.wait);
2060 
2061 	rw_lock(0, c->root, c->root->level);
2062 	/*
2063 	 * Run multiple threads to check btree nodes in parallel,
2064 	 * if check_state.enough is non-zero, it means current
2065 	 * running check threads are enough, unncessary to create
2066 	 * more.
2067 	 */
2068 	for (i = 0; i < check_state.total_threads; i++) {
2069 		/* fetch latest check_state.enough earlier */
2070 		smp_mb__before_atomic();
2071 		if (atomic_read(&check_state.enough))
2072 			break;
2073 
2074 		check_state.infos[i].result = 0;
2075 		check_state.infos[i].state = &check_state;
2076 
2077 		check_state.infos[i].thread =
2078 			kthread_run(bch_btree_check_thread,
2079 				    &check_state.infos[i],
2080 				    "bch_btrchk[%d]", i);
2081 		if (IS_ERR(check_state.infos[i].thread)) {
2082 			pr_err("fails to run thread bch_btrchk[%d]\n", i);
2083 			for (--i; i >= 0; i--)
2084 				kthread_stop(check_state.infos[i].thread);
2085 			ret = -ENOMEM;
2086 			goto out;
2087 		}
2088 		atomic_inc(&check_state.started);
2089 	}
2090 
2091 	/*
2092 	 * Must wait for all threads to stop.
2093 	 */
2094 	wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2095 
2096 	for (i = 0; i < check_state.total_threads; i++) {
2097 		if (check_state.infos[i].result) {
2098 			ret = check_state.infos[i].result;
2099 			goto out;
2100 		}
2101 	}
2102 
2103 out:
2104 	rw_unlock(0, c->root);
2105 	return ret;
2106 }
2107 
bch_initial_gc_finish(struct cache_set * c)2108 void bch_initial_gc_finish(struct cache_set *c)
2109 {
2110 	struct cache *ca = c->cache;
2111 	struct bucket *b;
2112 
2113 	bch_btree_gc_finish(c);
2114 
2115 	mutex_lock(&c->bucket_lock);
2116 
2117 	/*
2118 	 * We need to put some unused buckets directly on the prio freelist in
2119 	 * order to get the allocator thread started - it needs freed buckets in
2120 	 * order to rewrite the prios and gens, and it needs to rewrite prios
2121 	 * and gens in order to free buckets.
2122 	 *
2123 	 * This is only safe for buckets that have no live data in them, which
2124 	 * there should always be some of.
2125 	 */
2126 	for_each_bucket(b, ca) {
2127 		if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2128 		    fifo_full(&ca->free[RESERVE_BTREE]))
2129 			break;
2130 
2131 		if (bch_can_invalidate_bucket(ca, b) &&
2132 		    !GC_MARK(b)) {
2133 			__bch_invalidate_one_bucket(ca, b);
2134 			if (!fifo_push(&ca->free[RESERVE_PRIO],
2135 			   b - ca->buckets))
2136 				fifo_push(&ca->free[RESERVE_BTREE],
2137 					  b - ca->buckets);
2138 		}
2139 	}
2140 
2141 	mutex_unlock(&c->bucket_lock);
2142 }
2143 
2144 /* Btree insertion */
2145 
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2146 static bool btree_insert_key(struct btree *b, struct bkey *k,
2147 			     struct bkey *replace_key)
2148 {
2149 	unsigned int status;
2150 
2151 	BUG_ON(bkey_cmp(k, &b->key) > 0);
2152 
2153 	status = bch_btree_insert_key(&b->keys, k, replace_key);
2154 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2155 		bch_check_keys(&b->keys, "%u for %s", status,
2156 			       replace_key ? "replace" : "insert");
2157 
2158 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2159 					      status);
2160 		return true;
2161 	} else
2162 		return false;
2163 }
2164 
insert_u64s_remaining(struct btree * b)2165 static size_t insert_u64s_remaining(struct btree *b)
2166 {
2167 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
2168 
2169 	/*
2170 	 * Might land in the middle of an existing extent and have to split it
2171 	 */
2172 	if (b->keys.ops->is_extents)
2173 		ret -= KEY_MAX_U64S;
2174 
2175 	return max(ret, 0L);
2176 }
2177 
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2178 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2179 				  struct keylist *insert_keys,
2180 				  struct bkey *replace_key)
2181 {
2182 	bool ret = false;
2183 	int oldsize = bch_count_data(&b->keys);
2184 
2185 	while (!bch_keylist_empty(insert_keys)) {
2186 		struct bkey *k = insert_keys->keys;
2187 
2188 		if (bkey_u64s(k) > insert_u64s_remaining(b))
2189 			break;
2190 
2191 		if (bkey_cmp(k, &b->key) <= 0) {
2192 			if (!b->level)
2193 				bkey_put(b->c, k);
2194 
2195 			ret |= btree_insert_key(b, k, replace_key);
2196 			bch_keylist_pop_front(insert_keys);
2197 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2198 			BKEY_PADDED(key) temp;
2199 			bkey_copy(&temp.key, insert_keys->keys);
2200 
2201 			bch_cut_back(&b->key, &temp.key);
2202 			bch_cut_front(&b->key, insert_keys->keys);
2203 
2204 			ret |= btree_insert_key(b, &temp.key, replace_key);
2205 			break;
2206 		} else {
2207 			break;
2208 		}
2209 	}
2210 
2211 	if (!ret)
2212 		op->insert_collision = true;
2213 
2214 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2215 
2216 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2217 	return ret;
2218 }
2219 
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2220 static int btree_split(struct btree *b, struct btree_op *op,
2221 		       struct keylist *insert_keys,
2222 		       struct bkey *replace_key)
2223 {
2224 	bool split;
2225 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2226 	uint64_t start_time = local_clock();
2227 	struct closure cl;
2228 	struct keylist parent_keys;
2229 
2230 	closure_init_stack(&cl);
2231 	bch_keylist_init(&parent_keys);
2232 
2233 	if (btree_check_reserve(b, op)) {
2234 		if (!b->level)
2235 			return -EINTR;
2236 		else
2237 			WARN(1, "insufficient reserve for split\n");
2238 	}
2239 
2240 	n1 = btree_node_alloc_replacement(b, op);
2241 	if (IS_ERR(n1))
2242 		goto err;
2243 
2244 	split = set_blocks(btree_bset_first(n1),
2245 			   block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2246 
2247 	if (split) {
2248 		unsigned int keys = 0;
2249 
2250 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2251 
2252 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2253 		if (IS_ERR(n2))
2254 			goto err_free1;
2255 
2256 		if (!b->parent) {
2257 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2258 			if (IS_ERR(n3))
2259 				goto err_free2;
2260 		}
2261 
2262 		mutex_lock(&n1->write_lock);
2263 		mutex_lock(&n2->write_lock);
2264 
2265 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2266 
2267 		/*
2268 		 * Has to be a linear search because we don't have an auxiliary
2269 		 * search tree yet
2270 		 */
2271 
2272 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2273 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2274 							keys));
2275 
2276 		bkey_copy_key(&n1->key,
2277 			      bset_bkey_idx(btree_bset_first(n1), keys));
2278 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2279 
2280 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2281 		btree_bset_first(n1)->keys = keys;
2282 
2283 		memcpy(btree_bset_first(n2)->start,
2284 		       bset_bkey_last(btree_bset_first(n1)),
2285 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2286 
2287 		bkey_copy_key(&n2->key, &b->key);
2288 
2289 		bch_keylist_add(&parent_keys, &n2->key);
2290 		bch_btree_node_write(n2, &cl);
2291 		mutex_unlock(&n2->write_lock);
2292 		rw_unlock(true, n2);
2293 	} else {
2294 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2295 
2296 		mutex_lock(&n1->write_lock);
2297 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2298 	}
2299 
2300 	bch_keylist_add(&parent_keys, &n1->key);
2301 	bch_btree_node_write(n1, &cl);
2302 	mutex_unlock(&n1->write_lock);
2303 
2304 	if (n3) {
2305 		/* Depth increases, make a new root */
2306 		mutex_lock(&n3->write_lock);
2307 		bkey_copy_key(&n3->key, &MAX_KEY);
2308 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2309 		bch_btree_node_write(n3, &cl);
2310 		mutex_unlock(&n3->write_lock);
2311 
2312 		closure_sync(&cl);
2313 		bch_btree_set_root(n3);
2314 		rw_unlock(true, n3);
2315 	} else if (!b->parent) {
2316 		/* Root filled up but didn't need to be split */
2317 		closure_sync(&cl);
2318 		bch_btree_set_root(n1);
2319 	} else {
2320 		/* Split a non root node */
2321 		closure_sync(&cl);
2322 		make_btree_freeing_key(b, parent_keys.top);
2323 		bch_keylist_push(&parent_keys);
2324 
2325 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2326 		BUG_ON(!bch_keylist_empty(&parent_keys));
2327 	}
2328 
2329 	btree_node_free(b);
2330 	rw_unlock(true, n1);
2331 
2332 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2333 
2334 	return 0;
2335 err_free2:
2336 	bkey_put(b->c, &n2->key);
2337 	btree_node_free(n2);
2338 	rw_unlock(true, n2);
2339 err_free1:
2340 	bkey_put(b->c, &n1->key);
2341 	btree_node_free(n1);
2342 	rw_unlock(true, n1);
2343 err:
2344 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2345 
2346 	if (n3 == ERR_PTR(-EAGAIN) ||
2347 	    n2 == ERR_PTR(-EAGAIN) ||
2348 	    n1 == ERR_PTR(-EAGAIN))
2349 		return -EAGAIN;
2350 
2351 	return -ENOMEM;
2352 }
2353 
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2354 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2355 				 struct keylist *insert_keys,
2356 				 atomic_t *journal_ref,
2357 				 struct bkey *replace_key)
2358 {
2359 	struct closure cl;
2360 
2361 	BUG_ON(b->level && replace_key);
2362 
2363 	closure_init_stack(&cl);
2364 
2365 	mutex_lock(&b->write_lock);
2366 
2367 	if (write_block(b) != btree_bset_last(b) &&
2368 	    b->keys.last_set_unwritten)
2369 		bch_btree_init_next(b); /* just wrote a set */
2370 
2371 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2372 		mutex_unlock(&b->write_lock);
2373 		goto split;
2374 	}
2375 
2376 	BUG_ON(write_block(b) != btree_bset_last(b));
2377 
2378 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2379 		if (!b->level)
2380 			bch_btree_leaf_dirty(b, journal_ref);
2381 		else
2382 			bch_btree_node_write(b, &cl);
2383 	}
2384 
2385 	mutex_unlock(&b->write_lock);
2386 
2387 	/* wait for btree node write if necessary, after unlock */
2388 	closure_sync(&cl);
2389 
2390 	return 0;
2391 split:
2392 	if (current->bio_list) {
2393 		op->lock = b->c->root->level + 1;
2394 		return -EAGAIN;
2395 	} else if (op->lock <= b->c->root->level) {
2396 		op->lock = b->c->root->level + 1;
2397 		return -EINTR;
2398 	} else {
2399 		/* Invalidated all iterators */
2400 		int ret = btree_split(b, op, insert_keys, replace_key);
2401 
2402 		if (bch_keylist_empty(insert_keys))
2403 			return 0;
2404 		else if (!ret)
2405 			return -EINTR;
2406 		return ret;
2407 	}
2408 }
2409 
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2410 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2411 			       struct bkey *check_key)
2412 {
2413 	int ret = -EINTR;
2414 	uint64_t btree_ptr = b->key.ptr[0];
2415 	unsigned long seq = b->seq;
2416 	struct keylist insert;
2417 	bool upgrade = op->lock == -1;
2418 
2419 	bch_keylist_init(&insert);
2420 
2421 	if (upgrade) {
2422 		rw_unlock(false, b);
2423 		rw_lock(true, b, b->level);
2424 
2425 		if (b->key.ptr[0] != btree_ptr ||
2426 		    b->seq != seq + 1) {
2427 			op->lock = b->level;
2428 			goto out;
2429 		}
2430 	}
2431 
2432 	SET_KEY_PTRS(check_key, 1);
2433 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2434 
2435 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2436 
2437 	bch_keylist_add(&insert, check_key);
2438 
2439 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2440 
2441 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2442 out:
2443 	if (upgrade)
2444 		downgrade_write(&b->lock);
2445 	return ret;
2446 }
2447 
2448 struct btree_insert_op {
2449 	struct btree_op	op;
2450 	struct keylist	*keys;
2451 	atomic_t	*journal_ref;
2452 	struct bkey	*replace_key;
2453 };
2454 
btree_insert_fn(struct btree_op * b_op,struct btree * b)2455 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2456 {
2457 	struct btree_insert_op *op = container_of(b_op,
2458 					struct btree_insert_op, op);
2459 
2460 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2461 					op->journal_ref, op->replace_key);
2462 	if (ret && !bch_keylist_empty(op->keys))
2463 		return ret;
2464 	else
2465 		return MAP_DONE;
2466 }
2467 
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2468 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2469 		     atomic_t *journal_ref, struct bkey *replace_key)
2470 {
2471 	struct btree_insert_op op;
2472 	int ret = 0;
2473 
2474 	BUG_ON(current->bio_list);
2475 	BUG_ON(bch_keylist_empty(keys));
2476 
2477 	bch_btree_op_init(&op.op, 0);
2478 	op.keys		= keys;
2479 	op.journal_ref	= journal_ref;
2480 	op.replace_key	= replace_key;
2481 
2482 	while (!ret && !bch_keylist_empty(keys)) {
2483 		op.op.lock = 0;
2484 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2485 					       &START_KEY(keys->keys),
2486 					       btree_insert_fn);
2487 	}
2488 
2489 	if (ret) {
2490 		struct bkey *k;
2491 
2492 		pr_err("error %i\n", ret);
2493 
2494 		while ((k = bch_keylist_pop(keys)))
2495 			bkey_put(c, k);
2496 	} else if (op.op.insert_collision)
2497 		ret = -ESRCH;
2498 
2499 	return ret;
2500 }
2501 
bch_btree_set_root(struct btree * b)2502 void bch_btree_set_root(struct btree *b)
2503 {
2504 	unsigned int i;
2505 	struct closure cl;
2506 
2507 	closure_init_stack(&cl);
2508 
2509 	trace_bcache_btree_set_root(b);
2510 
2511 	BUG_ON(!b->written);
2512 
2513 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2514 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2515 
2516 	mutex_lock(&b->c->bucket_lock);
2517 	list_del_init(&b->list);
2518 	mutex_unlock(&b->c->bucket_lock);
2519 
2520 	b->c->root = b;
2521 
2522 	bch_journal_meta(b->c, &cl);
2523 	closure_sync(&cl);
2524 }
2525 
2526 /* Map across nodes or keys */
2527 
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2528 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2529 				       struct bkey *from,
2530 				       btree_map_nodes_fn *fn, int flags)
2531 {
2532 	int ret = MAP_CONTINUE;
2533 
2534 	if (b->level) {
2535 		struct bkey *k;
2536 		struct btree_iter iter;
2537 
2538 		bch_btree_iter_init(&b->keys, &iter, from);
2539 
2540 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2541 						       bch_ptr_bad))) {
2542 			ret = bcache_btree(map_nodes_recurse, k, b,
2543 				    op, from, fn, flags);
2544 			from = NULL;
2545 
2546 			if (ret != MAP_CONTINUE)
2547 				return ret;
2548 		}
2549 	}
2550 
2551 	if (!b->level || flags == MAP_ALL_NODES)
2552 		ret = fn(op, b);
2553 
2554 	return ret;
2555 }
2556 
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2557 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2558 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2559 {
2560 	return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2561 }
2562 
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2563 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2564 				      struct bkey *from, btree_map_keys_fn *fn,
2565 				      int flags)
2566 {
2567 	int ret = MAP_CONTINUE;
2568 	struct bkey *k;
2569 	struct btree_iter iter;
2570 
2571 	bch_btree_iter_init(&b->keys, &iter, from);
2572 
2573 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2574 		ret = !b->level
2575 			? fn(op, b, k)
2576 			: bcache_btree(map_keys_recurse, k,
2577 				       b, op, from, fn, flags);
2578 		from = NULL;
2579 
2580 		if (ret != MAP_CONTINUE)
2581 			return ret;
2582 	}
2583 
2584 	if (!b->level && (flags & MAP_END_KEY))
2585 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2586 				     KEY_OFFSET(&b->key), 0));
2587 
2588 	return ret;
2589 }
2590 
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2591 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2592 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2593 {
2594 	return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2595 }
2596 
2597 /* Keybuf code */
2598 
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2599 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2600 {
2601 	/* Overlapping keys compare equal */
2602 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2603 		return -1;
2604 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2605 		return 1;
2606 	return 0;
2607 }
2608 
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2609 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2610 					    struct keybuf_key *r)
2611 {
2612 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2613 }
2614 
2615 struct refill {
2616 	struct btree_op	op;
2617 	unsigned int	nr_found;
2618 	struct keybuf	*buf;
2619 	struct bkey	*end;
2620 	keybuf_pred_fn	*pred;
2621 };
2622 
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2623 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2624 			    struct bkey *k)
2625 {
2626 	struct refill *refill = container_of(op, struct refill, op);
2627 	struct keybuf *buf = refill->buf;
2628 	int ret = MAP_CONTINUE;
2629 
2630 	if (bkey_cmp(k, refill->end) > 0) {
2631 		ret = MAP_DONE;
2632 		goto out;
2633 	}
2634 
2635 	if (!KEY_SIZE(k)) /* end key */
2636 		goto out;
2637 
2638 	if (refill->pred(buf, k)) {
2639 		struct keybuf_key *w;
2640 
2641 		spin_lock(&buf->lock);
2642 
2643 		w = array_alloc(&buf->freelist);
2644 		if (!w) {
2645 			spin_unlock(&buf->lock);
2646 			return MAP_DONE;
2647 		}
2648 
2649 		w->private = NULL;
2650 		bkey_copy(&w->key, k);
2651 
2652 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2653 			array_free(&buf->freelist, w);
2654 		else
2655 			refill->nr_found++;
2656 
2657 		if (array_freelist_empty(&buf->freelist))
2658 			ret = MAP_DONE;
2659 
2660 		spin_unlock(&buf->lock);
2661 	}
2662 out:
2663 	buf->last_scanned = *k;
2664 	return ret;
2665 }
2666 
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2667 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2668 		       struct bkey *end, keybuf_pred_fn *pred)
2669 {
2670 	struct bkey start = buf->last_scanned;
2671 	struct refill refill;
2672 
2673 	cond_resched();
2674 
2675 	bch_btree_op_init(&refill.op, -1);
2676 	refill.nr_found	= 0;
2677 	refill.buf	= buf;
2678 	refill.end	= end;
2679 	refill.pred	= pred;
2680 
2681 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2682 			   refill_keybuf_fn, MAP_END_KEY);
2683 
2684 	trace_bcache_keyscan(refill.nr_found,
2685 			     KEY_INODE(&start), KEY_OFFSET(&start),
2686 			     KEY_INODE(&buf->last_scanned),
2687 			     KEY_OFFSET(&buf->last_scanned));
2688 
2689 	spin_lock(&buf->lock);
2690 
2691 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2692 		struct keybuf_key *w;
2693 
2694 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2695 		buf->start	= START_KEY(&w->key);
2696 
2697 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2698 		buf->end	= w->key;
2699 	} else {
2700 		buf->start	= MAX_KEY;
2701 		buf->end	= MAX_KEY;
2702 	}
2703 
2704 	spin_unlock(&buf->lock);
2705 }
2706 
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2707 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2708 {
2709 	rb_erase(&w->node, &buf->keys);
2710 	array_free(&buf->freelist, w);
2711 }
2712 
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2713 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2714 {
2715 	spin_lock(&buf->lock);
2716 	__bch_keybuf_del(buf, w);
2717 	spin_unlock(&buf->lock);
2718 }
2719 
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2720 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2721 				  struct bkey *end)
2722 {
2723 	bool ret = false;
2724 	struct keybuf_key *p, *w, s;
2725 
2726 	s.key = *start;
2727 
2728 	if (bkey_cmp(end, &buf->start) <= 0 ||
2729 	    bkey_cmp(start, &buf->end) >= 0)
2730 		return false;
2731 
2732 	spin_lock(&buf->lock);
2733 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2734 
2735 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2736 		p = w;
2737 		w = RB_NEXT(w, node);
2738 
2739 		if (p->private)
2740 			ret = true;
2741 		else
2742 			__bch_keybuf_del(buf, p);
2743 	}
2744 
2745 	spin_unlock(&buf->lock);
2746 	return ret;
2747 }
2748 
bch_keybuf_next(struct keybuf * buf)2749 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2750 {
2751 	struct keybuf_key *w;
2752 
2753 	spin_lock(&buf->lock);
2754 
2755 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2756 
2757 	while (w && w->private)
2758 		w = RB_NEXT(w, node);
2759 
2760 	if (w)
2761 		w->private = ERR_PTR(-EINTR);
2762 
2763 	spin_unlock(&buf->lock);
2764 	return w;
2765 }
2766 
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2767 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2768 					  struct keybuf *buf,
2769 					  struct bkey *end,
2770 					  keybuf_pred_fn *pred)
2771 {
2772 	struct keybuf_key *ret;
2773 
2774 	while (1) {
2775 		ret = bch_keybuf_next(buf);
2776 		if (ret)
2777 			break;
2778 
2779 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2780 			pr_debug("scan finished\n");
2781 			break;
2782 		}
2783 
2784 		bch_refill_keybuf(c, buf, end, pred);
2785 	}
2786 
2787 	return ret;
2788 }
2789 
bch_keybuf_init(struct keybuf * buf)2790 void bch_keybuf_init(struct keybuf *buf)
2791 {
2792 	buf->last_scanned	= MAX_KEY;
2793 	buf->keys		= RB_ROOT;
2794 
2795 	spin_lock_init(&buf->lock);
2796 	array_allocator_init(&buf->freelist);
2797 }
2798 
bch_btree_exit(void)2799 void bch_btree_exit(void)
2800 {
2801 	if (btree_io_wq)
2802 		destroy_workqueue(btree_io_wq);
2803 }
2804 
bch_btree_init(void)2805 int __init bch_btree_init(void)
2806 {
2807 	btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2808 	if (!btree_io_wq)
2809 		return -ENOMEM;
2810 
2811 	return 0;
2812 }
2813