1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "request.h"
14 #include "writeback.h"
15
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
20
21 #include <trace/events/bcache.h>
22
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
25
26 struct kmem_cache *bch_search_cache;
27
28 static void bch_data_insert_start(struct closure *cl);
29
cache_mode(struct cached_dev * dc)30 static unsigned int cache_mode(struct cached_dev *dc)
31 {
32 return BDEV_CACHE_MODE(&dc->sb);
33 }
34
verify(struct cached_dev * dc)35 static bool verify(struct cached_dev *dc)
36 {
37 return dc->verify;
38 }
39
bio_csum(struct bio * bio,struct bkey * k)40 static void bio_csum(struct bio *bio, struct bkey *k)
41 {
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48
49 csum = bch_crc64_update(csum, d, bv.bv_len);
50 kunmap(bv.bv_page);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54 }
55
56 /* Insert data into cache */
57
bch_data_insert_keys(struct closure * cl)58 static void bch_data_insert_keys(struct closure *cl)
59 {
60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 /*
66 * If we're looping, might already be waiting on
67 * another journal write - can't wait on more than one journal write at
68 * a time
69 *
70 * XXX: this looks wrong
71 */
72 #if 0
73 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
74 closure_sync(&s->cl);
75 #endif
76
77 if (!op->replace)
78 journal_ref = bch_journal(op->c, &op->insert_keys,
79 op->flush_journal ? cl : NULL);
80
81 ret = bch_btree_insert(op->c, &op->insert_keys,
82 journal_ref, replace_key);
83 if (ret == -ESRCH) {
84 op->replace_collision = true;
85 } else if (ret) {
86 op->status = BLK_STS_RESOURCE;
87 op->insert_data_done = true;
88 }
89
90 if (journal_ref)
91 atomic_dec_bug(journal_ref);
92
93 if (!op->insert_data_done) {
94 continue_at(cl, bch_data_insert_start, op->wq);
95 return;
96 }
97
98 bch_keylist_free(&op->insert_keys);
99 closure_return(cl);
100 }
101
bch_keylist_realloc(struct keylist * l,unsigned int u64s,struct cache_set * c)102 static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
103 struct cache_set *c)
104 {
105 size_t oldsize = bch_keylist_nkeys(l);
106 size_t newsize = oldsize + u64s;
107
108 /*
109 * The journalling code doesn't handle the case where the keys to insert
110 * is bigger than an empty write: If we just return -ENOMEM here,
111 * bch_data_insert_keys() will insert the keys created so far
112 * and finish the rest when the keylist is empty.
113 */
114 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
115 return -ENOMEM;
116
117 return __bch_keylist_realloc(l, u64s);
118 }
119
bch_data_invalidate(struct closure * cl)120 static void bch_data_invalidate(struct closure *cl)
121 {
122 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
123 struct bio *bio = op->bio;
124
125 pr_debug("invalidating %i sectors from %llu",
126 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
127
128 while (bio_sectors(bio)) {
129 unsigned int sectors = min(bio_sectors(bio),
130 1U << (KEY_SIZE_BITS - 1));
131
132 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
133 goto out;
134
135 bio->bi_iter.bi_sector += sectors;
136 bio->bi_iter.bi_size -= sectors << 9;
137
138 bch_keylist_add(&op->insert_keys,
139 &KEY(op->inode,
140 bio->bi_iter.bi_sector,
141 sectors));
142 }
143
144 op->insert_data_done = true;
145 /* get in bch_data_insert() */
146 bio_put(bio);
147 out:
148 continue_at(cl, bch_data_insert_keys, op->wq);
149 }
150
bch_data_insert_error(struct closure * cl)151 static void bch_data_insert_error(struct closure *cl)
152 {
153 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
154
155 /*
156 * Our data write just errored, which means we've got a bunch of keys to
157 * insert that point to data that wasn't successfully written.
158 *
159 * We don't have to insert those keys but we still have to invalidate
160 * that region of the cache - so, if we just strip off all the pointers
161 * from the keys we'll accomplish just that.
162 */
163
164 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
165
166 while (src != op->insert_keys.top) {
167 struct bkey *n = bkey_next(src);
168
169 SET_KEY_PTRS(src, 0);
170 memmove(dst, src, bkey_bytes(src));
171
172 dst = bkey_next(dst);
173 src = n;
174 }
175
176 op->insert_keys.top = dst;
177
178 bch_data_insert_keys(cl);
179 }
180
bch_data_insert_endio(struct bio * bio)181 static void bch_data_insert_endio(struct bio *bio)
182 {
183 struct closure *cl = bio->bi_private;
184 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
185
186 if (bio->bi_status) {
187 /* TODO: We could try to recover from this. */
188 if (op->writeback)
189 op->status = bio->bi_status;
190 else if (!op->replace)
191 set_closure_fn(cl, bch_data_insert_error, op->wq);
192 else
193 set_closure_fn(cl, NULL, NULL);
194 }
195
196 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
197 }
198
bch_data_insert_start(struct closure * cl)199 static void bch_data_insert_start(struct closure *cl)
200 {
201 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
202 struct bio *bio = op->bio, *n;
203
204 if (op->bypass)
205 return bch_data_invalidate(cl);
206
207 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
208 wake_up_gc(op->c);
209
210 /*
211 * Journal writes are marked REQ_PREFLUSH; if the original write was a
212 * flush, it'll wait on the journal write.
213 */
214 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
215
216 do {
217 unsigned int i;
218 struct bkey *k;
219 struct bio_set *split = &op->c->bio_split;
220
221 /* 1 for the device pointer and 1 for the chksum */
222 if (bch_keylist_realloc(&op->insert_keys,
223 3 + (op->csum ? 1 : 0),
224 op->c)) {
225 continue_at(cl, bch_data_insert_keys, op->wq);
226 return;
227 }
228
229 k = op->insert_keys.top;
230 bkey_init(k);
231 SET_KEY_INODE(k, op->inode);
232 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
233
234 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
235 op->write_point, op->write_prio,
236 op->writeback))
237 goto err;
238
239 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
240
241 n->bi_end_io = bch_data_insert_endio;
242 n->bi_private = cl;
243
244 if (op->writeback) {
245 SET_KEY_DIRTY(k, true);
246
247 for (i = 0; i < KEY_PTRS(k); i++)
248 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
249 GC_MARK_DIRTY);
250 }
251
252 SET_KEY_CSUM(k, op->csum);
253 if (KEY_CSUM(k))
254 bio_csum(n, k);
255
256 trace_bcache_cache_insert(k);
257 bch_keylist_push(&op->insert_keys);
258
259 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
260 bch_submit_bbio(n, op->c, k, 0);
261 } while (n != bio);
262
263 op->insert_data_done = true;
264 continue_at(cl, bch_data_insert_keys, op->wq);
265 return;
266 err:
267 /* bch_alloc_sectors() blocks if s->writeback = true */
268 BUG_ON(op->writeback);
269
270 /*
271 * But if it's not a writeback write we'd rather just bail out if
272 * there aren't any buckets ready to write to - it might take awhile and
273 * we might be starving btree writes for gc or something.
274 */
275
276 if (!op->replace) {
277 /*
278 * Writethrough write: We can't complete the write until we've
279 * updated the index. But we don't want to delay the write while
280 * we wait for buckets to be freed up, so just invalidate the
281 * rest of the write.
282 */
283 op->bypass = true;
284 return bch_data_invalidate(cl);
285 } else {
286 /*
287 * From a cache miss, we can just insert the keys for the data
288 * we have written or bail out if we didn't do anything.
289 */
290 op->insert_data_done = true;
291 bio_put(bio);
292
293 if (!bch_keylist_empty(&op->insert_keys))
294 continue_at(cl, bch_data_insert_keys, op->wq);
295 else
296 closure_return(cl);
297 }
298 }
299
300 /**
301 * bch_data_insert - stick some data in the cache
302 * @cl: closure pointer.
303 *
304 * This is the starting point for any data to end up in a cache device; it could
305 * be from a normal write, or a writeback write, or a write to a flash only
306 * volume - it's also used by the moving garbage collector to compact data in
307 * mostly empty buckets.
308 *
309 * It first writes the data to the cache, creating a list of keys to be inserted
310 * (if the data had to be fragmented there will be multiple keys); after the
311 * data is written it calls bch_journal, and after the keys have been added to
312 * the next journal write they're inserted into the btree.
313 *
314 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
315 * and op->inode is used for the key inode.
316 *
317 * If s->bypass is true, instead of inserting the data it invalidates the
318 * region of the cache represented by s->cache_bio and op->inode.
319 */
bch_data_insert(struct closure * cl)320 void bch_data_insert(struct closure *cl)
321 {
322 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
323
324 trace_bcache_write(op->c, op->inode, op->bio,
325 op->writeback, op->bypass);
326
327 bch_keylist_init(&op->insert_keys);
328 bio_get(op->bio);
329 bch_data_insert_start(cl);
330 }
331
332 /* Congested? */
333
bch_get_congested(struct cache_set * c)334 unsigned int bch_get_congested(struct cache_set *c)
335 {
336 int i;
337 long rand;
338
339 if (!c->congested_read_threshold_us &&
340 !c->congested_write_threshold_us)
341 return 0;
342
343 i = (local_clock_us() - c->congested_last_us) / 1024;
344 if (i < 0)
345 return 0;
346
347 i += atomic_read(&c->congested);
348 if (i >= 0)
349 return 0;
350
351 i += CONGESTED_MAX;
352
353 if (i > 0)
354 i = fract_exp_two(i, 6);
355
356 rand = get_random_int();
357 i -= bitmap_weight(&rand, BITS_PER_LONG);
358
359 return i > 0 ? i : 1;
360 }
361
add_sequential(struct task_struct * t)362 static void add_sequential(struct task_struct *t)
363 {
364 ewma_add(t->sequential_io_avg,
365 t->sequential_io, 8, 0);
366
367 t->sequential_io = 0;
368 }
369
iohash(struct cached_dev * dc,uint64_t k)370 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
371 {
372 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
373 }
374
check_should_bypass(struct cached_dev * dc,struct bio * bio)375 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
376 {
377 struct cache_set *c = dc->disk.c;
378 unsigned int mode = cache_mode(dc);
379 unsigned int sectors, congested = bch_get_congested(c);
380 struct task_struct *task = current;
381 struct io *i;
382
383 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
384 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
385 (bio_op(bio) == REQ_OP_DISCARD))
386 goto skip;
387
388 if (mode == CACHE_MODE_NONE ||
389 (mode == CACHE_MODE_WRITEAROUND &&
390 op_is_write(bio_op(bio))))
391 goto skip;
392
393 /*
394 * Flag for bypass if the IO is for read-ahead or background,
395 * unless the read-ahead request is for metadata (eg, for gfs2).
396 */
397 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
398 !(bio->bi_opf & REQ_META))
399 goto skip;
400
401 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
402 bio_sectors(bio) & (c->sb.block_size - 1)) {
403 pr_debug("skipping unaligned io");
404 goto skip;
405 }
406
407 if (bypass_torture_test(dc)) {
408 if ((get_random_int() & 3) == 3)
409 goto skip;
410 else
411 goto rescale;
412 }
413
414 if (!congested && !dc->sequential_cutoff)
415 goto rescale;
416
417 spin_lock(&dc->io_lock);
418
419 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
420 if (i->last == bio->bi_iter.bi_sector &&
421 time_before(jiffies, i->jiffies))
422 goto found;
423
424 i = list_first_entry(&dc->io_lru, struct io, lru);
425
426 add_sequential(task);
427 i->sequential = 0;
428 found:
429 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
430 i->sequential += bio->bi_iter.bi_size;
431
432 i->last = bio_end_sector(bio);
433 i->jiffies = jiffies + msecs_to_jiffies(5000);
434 task->sequential_io = i->sequential;
435
436 hlist_del(&i->hash);
437 hlist_add_head(&i->hash, iohash(dc, i->last));
438 list_move_tail(&i->lru, &dc->io_lru);
439
440 spin_unlock(&dc->io_lock);
441
442 sectors = max(task->sequential_io,
443 task->sequential_io_avg) >> 9;
444
445 if (dc->sequential_cutoff &&
446 sectors >= dc->sequential_cutoff >> 9) {
447 trace_bcache_bypass_sequential(bio);
448 goto skip;
449 }
450
451 if (congested && sectors >= congested) {
452 trace_bcache_bypass_congested(bio);
453 goto skip;
454 }
455
456 rescale:
457 bch_rescale_priorities(c, bio_sectors(bio));
458 return false;
459 skip:
460 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
461 return true;
462 }
463
464 /* Cache lookup */
465
466 struct search {
467 /* Stack frame for bio_complete */
468 struct closure cl;
469
470 struct bbio bio;
471 struct bio *orig_bio;
472 struct bio *cache_miss;
473 struct bcache_device *d;
474
475 unsigned int insert_bio_sectors;
476 unsigned int recoverable:1;
477 unsigned int write:1;
478 unsigned int read_dirty_data:1;
479 unsigned int cache_missed:1;
480
481 unsigned long start_time;
482
483 struct btree_op op;
484 struct data_insert_op iop;
485 };
486
bch_cache_read_endio(struct bio * bio)487 static void bch_cache_read_endio(struct bio *bio)
488 {
489 struct bbio *b = container_of(bio, struct bbio, bio);
490 struct closure *cl = bio->bi_private;
491 struct search *s = container_of(cl, struct search, cl);
492
493 /*
494 * If the bucket was reused while our bio was in flight, we might have
495 * read the wrong data. Set s->error but not error so it doesn't get
496 * counted against the cache device, but we'll still reread the data
497 * from the backing device.
498 */
499
500 if (bio->bi_status)
501 s->iop.status = bio->bi_status;
502 else if (!KEY_DIRTY(&b->key) &&
503 ptr_stale(s->iop.c, &b->key, 0)) {
504 atomic_long_inc(&s->iop.c->cache_read_races);
505 s->iop.status = BLK_STS_IOERR;
506 }
507
508 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
509 }
510
511 /*
512 * Read from a single key, handling the initial cache miss if the key starts in
513 * the middle of the bio
514 */
cache_lookup_fn(struct btree_op * op,struct btree * b,struct bkey * k)515 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
516 {
517 struct search *s = container_of(op, struct search, op);
518 struct bio *n, *bio = &s->bio.bio;
519 struct bkey *bio_key;
520 unsigned int ptr;
521
522 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
523 return MAP_CONTINUE;
524
525 if (KEY_INODE(k) != s->iop.inode ||
526 KEY_START(k) > bio->bi_iter.bi_sector) {
527 unsigned int bio_sectors = bio_sectors(bio);
528 unsigned int sectors = KEY_INODE(k) == s->iop.inode
529 ? min_t(uint64_t, INT_MAX,
530 KEY_START(k) - bio->bi_iter.bi_sector)
531 : INT_MAX;
532 int ret = s->d->cache_miss(b, s, bio, sectors);
533
534 if (ret != MAP_CONTINUE)
535 return ret;
536
537 /* if this was a complete miss we shouldn't get here */
538 BUG_ON(bio_sectors <= sectors);
539 }
540
541 if (!KEY_SIZE(k))
542 return MAP_CONTINUE;
543
544 /* XXX: figure out best pointer - for multiple cache devices */
545 ptr = 0;
546
547 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
548
549 if (KEY_DIRTY(k))
550 s->read_dirty_data = true;
551
552 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
553 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
554 GFP_NOIO, &s->d->bio_split);
555
556 bio_key = &container_of(n, struct bbio, bio)->key;
557 bch_bkey_copy_single_ptr(bio_key, k, ptr);
558
559 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
560 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
561
562 n->bi_end_io = bch_cache_read_endio;
563 n->bi_private = &s->cl;
564
565 /*
566 * The bucket we're reading from might be reused while our bio
567 * is in flight, and we could then end up reading the wrong
568 * data.
569 *
570 * We guard against this by checking (in cache_read_endio()) if
571 * the pointer is stale again; if so, we treat it as an error
572 * and reread from the backing device (but we don't pass that
573 * error up anywhere).
574 */
575
576 __bch_submit_bbio(n, b->c);
577 return n == bio ? MAP_DONE : MAP_CONTINUE;
578 }
579
cache_lookup(struct closure * cl)580 static void cache_lookup(struct closure *cl)
581 {
582 struct search *s = container_of(cl, struct search, iop.cl);
583 struct bio *bio = &s->bio.bio;
584 struct cached_dev *dc;
585 int ret;
586
587 bch_btree_op_init(&s->op, -1);
588
589 ret = bch_btree_map_keys(&s->op, s->iop.c,
590 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
591 cache_lookup_fn, MAP_END_KEY);
592 if (ret == -EAGAIN) {
593 continue_at(cl, cache_lookup, bcache_wq);
594 return;
595 }
596
597 /*
598 * We might meet err when searching the btree, If that happens, we will
599 * get negative ret, in this scenario we should not recover data from
600 * backing device (when cache device is dirty) because we don't know
601 * whether bkeys the read request covered are all clean.
602 *
603 * And after that happened, s->iop.status is still its initial value
604 * before we submit s->bio.bio
605 */
606 if (ret < 0) {
607 BUG_ON(ret == -EINTR);
608 if (s->d && s->d->c &&
609 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
610 dc = container_of(s->d, struct cached_dev, disk);
611 if (dc && atomic_read(&dc->has_dirty))
612 s->recoverable = false;
613 }
614 if (!s->iop.status)
615 s->iop.status = BLK_STS_IOERR;
616 }
617
618 closure_return(cl);
619 }
620
621 /* Common code for the make_request functions */
622
request_endio(struct bio * bio)623 static void request_endio(struct bio *bio)
624 {
625 struct closure *cl = bio->bi_private;
626
627 if (bio->bi_status) {
628 struct search *s = container_of(cl, struct search, cl);
629
630 s->iop.status = bio->bi_status;
631 /* Only cache read errors are recoverable */
632 s->recoverable = false;
633 }
634
635 bio_put(bio);
636 closure_put(cl);
637 }
638
backing_request_endio(struct bio * bio)639 static void backing_request_endio(struct bio *bio)
640 {
641 struct closure *cl = bio->bi_private;
642
643 if (bio->bi_status) {
644 struct search *s = container_of(cl, struct search, cl);
645 struct cached_dev *dc = container_of(s->d,
646 struct cached_dev, disk);
647 /*
648 * If a bio has REQ_PREFLUSH for writeback mode, it is
649 * speically assembled in cached_dev_write() for a non-zero
650 * write request which has REQ_PREFLUSH. we don't set
651 * s->iop.status by this failure, the status will be decided
652 * by result of bch_data_insert() operation.
653 */
654 if (unlikely(s->iop.writeback &&
655 bio->bi_opf & REQ_PREFLUSH)) {
656 pr_err("Can't flush %s: returned bi_status %i",
657 dc->backing_dev_name, bio->bi_status);
658 } else {
659 /* set to orig_bio->bi_status in bio_complete() */
660 s->iop.status = bio->bi_status;
661 }
662 s->recoverable = false;
663 /* should count I/O error for backing device here */
664 bch_count_backing_io_errors(dc, bio);
665 }
666
667 bio_put(bio);
668 closure_put(cl);
669 }
670
bio_complete(struct search * s)671 static void bio_complete(struct search *s)
672 {
673 if (s->orig_bio) {
674 generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
675 &s->d->disk->part0, s->start_time);
676
677 trace_bcache_request_end(s->d, s->orig_bio);
678 s->orig_bio->bi_status = s->iop.status;
679 bio_endio(s->orig_bio);
680 s->orig_bio = NULL;
681 }
682 }
683
do_bio_hook(struct search * s,struct bio * orig_bio,bio_end_io_t * end_io_fn)684 static void do_bio_hook(struct search *s,
685 struct bio *orig_bio,
686 bio_end_io_t *end_io_fn)
687 {
688 struct bio *bio = &s->bio.bio;
689
690 bio_init(bio, NULL, 0);
691 __bio_clone_fast(bio, orig_bio);
692 /*
693 * bi_end_io can be set separately somewhere else, e.g. the
694 * variants in,
695 * - cache_bio->bi_end_io from cached_dev_cache_miss()
696 * - n->bi_end_io from cache_lookup_fn()
697 */
698 bio->bi_end_io = end_io_fn;
699 bio->bi_private = &s->cl;
700
701 bio_cnt_set(bio, 3);
702 }
703
search_free(struct closure * cl)704 static void search_free(struct closure *cl)
705 {
706 struct search *s = container_of(cl, struct search, cl);
707
708 atomic_dec(&s->d->c->search_inflight);
709
710 if (s->iop.bio)
711 bio_put(s->iop.bio);
712
713 bio_complete(s);
714 closure_debug_destroy(cl);
715 mempool_free(s, &s->d->c->search);
716 }
717
search_alloc(struct bio * bio,struct bcache_device * d)718 static inline struct search *search_alloc(struct bio *bio,
719 struct bcache_device *d)
720 {
721 struct search *s;
722
723 s = mempool_alloc(&d->c->search, GFP_NOIO);
724
725 closure_init(&s->cl, NULL);
726 do_bio_hook(s, bio, request_endio);
727 atomic_inc(&d->c->search_inflight);
728
729 s->orig_bio = bio;
730 s->cache_miss = NULL;
731 s->cache_missed = 0;
732 s->d = d;
733 s->recoverable = 1;
734 s->write = op_is_write(bio_op(bio));
735 s->read_dirty_data = 0;
736 s->start_time = jiffies;
737
738 s->iop.c = d->c;
739 s->iop.bio = NULL;
740 s->iop.inode = d->id;
741 s->iop.write_point = hash_long((unsigned long) current, 16);
742 s->iop.write_prio = 0;
743 s->iop.status = 0;
744 s->iop.flags = 0;
745 s->iop.flush_journal = op_is_flush(bio->bi_opf);
746 s->iop.wq = bcache_wq;
747
748 return s;
749 }
750
751 /* Cached devices */
752
cached_dev_bio_complete(struct closure * cl)753 static void cached_dev_bio_complete(struct closure *cl)
754 {
755 struct search *s = container_of(cl, struct search, cl);
756 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
757
758 search_free(cl);
759 cached_dev_put(dc);
760 }
761
762 /* Process reads */
763
cached_dev_cache_miss_done(struct closure * cl)764 static void cached_dev_cache_miss_done(struct closure *cl)
765 {
766 struct search *s = container_of(cl, struct search, cl);
767
768 if (s->iop.replace_collision)
769 bch_mark_cache_miss_collision(s->iop.c, s->d);
770
771 if (s->iop.bio)
772 bio_free_pages(s->iop.bio);
773
774 cached_dev_bio_complete(cl);
775 }
776
cached_dev_read_error(struct closure * cl)777 static void cached_dev_read_error(struct closure *cl)
778 {
779 struct search *s = container_of(cl, struct search, cl);
780 struct bio *bio = &s->bio.bio;
781
782 /*
783 * If read request hit dirty data (s->read_dirty_data is true),
784 * then recovery a failed read request from cached device may
785 * get a stale data back. So read failure recovery is only
786 * permitted when read request hit clean data in cache device,
787 * or when cache read race happened.
788 */
789 if (s->recoverable && !s->read_dirty_data) {
790 /* Retry from the backing device: */
791 trace_bcache_read_retry(s->orig_bio);
792
793 s->iop.status = 0;
794 do_bio_hook(s, s->orig_bio, backing_request_endio);
795
796 /* XXX: invalidate cache */
797
798 /* I/O request sent to backing device */
799 closure_bio_submit(s->iop.c, bio, cl);
800 }
801
802 continue_at(cl, cached_dev_cache_miss_done, NULL);
803 }
804
cached_dev_read_done(struct closure * cl)805 static void cached_dev_read_done(struct closure *cl)
806 {
807 struct search *s = container_of(cl, struct search, cl);
808 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
809
810 /*
811 * We had a cache miss; cache_bio now contains data ready to be inserted
812 * into the cache.
813 *
814 * First, we copy the data we just read from cache_bio's bounce buffers
815 * to the buffers the original bio pointed to:
816 */
817
818 if (s->iop.bio) {
819 bio_reset(s->iop.bio);
820 s->iop.bio->bi_iter.bi_sector =
821 s->cache_miss->bi_iter.bi_sector;
822 bio_copy_dev(s->iop.bio, s->cache_miss);
823 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
824 bch_bio_map(s->iop.bio, NULL);
825
826 bio_copy_data(s->cache_miss, s->iop.bio);
827
828 bio_put(s->cache_miss);
829 s->cache_miss = NULL;
830 }
831
832 if (verify(dc) && s->recoverable && !s->read_dirty_data)
833 bch_data_verify(dc, s->orig_bio);
834
835 bio_complete(s);
836
837 if (s->iop.bio &&
838 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
839 BUG_ON(!s->iop.replace);
840 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
841 }
842
843 continue_at(cl, cached_dev_cache_miss_done, NULL);
844 }
845
cached_dev_read_done_bh(struct closure * cl)846 static void cached_dev_read_done_bh(struct closure *cl)
847 {
848 struct search *s = container_of(cl, struct search, cl);
849 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
850
851 bch_mark_cache_accounting(s->iop.c, s->d,
852 !s->cache_missed, s->iop.bypass);
853 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
854
855 if (s->iop.status)
856 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
857 else if (s->iop.bio || verify(dc))
858 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
859 else
860 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
861 }
862
cached_dev_cache_miss(struct btree * b,struct search * s,struct bio * bio,unsigned int sectors)863 static int cached_dev_cache_miss(struct btree *b, struct search *s,
864 struct bio *bio, unsigned int sectors)
865 {
866 int ret = MAP_CONTINUE;
867 unsigned int reada = 0;
868 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
869 struct bio *miss, *cache_bio;
870
871 s->cache_missed = 1;
872
873 if (s->cache_miss || s->iop.bypass) {
874 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
875 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
876 goto out_submit;
877 }
878
879 if (!(bio->bi_opf & REQ_RAHEAD) &&
880 !(bio->bi_opf & REQ_META) &&
881 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
882 reada = min_t(sector_t, dc->readahead >> 9,
883 get_capacity(bio->bi_disk) - bio_end_sector(bio));
884
885 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
886
887 s->iop.replace_key = KEY(s->iop.inode,
888 bio->bi_iter.bi_sector + s->insert_bio_sectors,
889 s->insert_bio_sectors);
890
891 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
892 if (ret)
893 return ret;
894
895 s->iop.replace = true;
896
897 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
898
899 /* btree_search_recurse()'s btree iterator is no good anymore */
900 ret = miss == bio ? MAP_DONE : -EINTR;
901
902 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
903 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
904 &dc->disk.bio_split);
905 if (!cache_bio)
906 goto out_submit;
907
908 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
909 bio_copy_dev(cache_bio, miss);
910 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
911
912 cache_bio->bi_end_io = backing_request_endio;
913 cache_bio->bi_private = &s->cl;
914
915 bch_bio_map(cache_bio, NULL);
916 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
917 goto out_put;
918
919 if (reada)
920 bch_mark_cache_readahead(s->iop.c, s->d);
921
922 s->cache_miss = miss;
923 s->iop.bio = cache_bio;
924 bio_get(cache_bio);
925 /* I/O request sent to backing device */
926 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
927
928 return ret;
929 out_put:
930 bio_put(cache_bio);
931 out_submit:
932 miss->bi_end_io = backing_request_endio;
933 miss->bi_private = &s->cl;
934 /* I/O request sent to backing device */
935 closure_bio_submit(s->iop.c, miss, &s->cl);
936 return ret;
937 }
938
cached_dev_read(struct cached_dev * dc,struct search * s)939 static void cached_dev_read(struct cached_dev *dc, struct search *s)
940 {
941 struct closure *cl = &s->cl;
942
943 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
944 continue_at(cl, cached_dev_read_done_bh, NULL);
945 }
946
947 /* Process writes */
948
cached_dev_write_complete(struct closure * cl)949 static void cached_dev_write_complete(struct closure *cl)
950 {
951 struct search *s = container_of(cl, struct search, cl);
952 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
953
954 up_read_non_owner(&dc->writeback_lock);
955 cached_dev_bio_complete(cl);
956 }
957
cached_dev_write(struct cached_dev * dc,struct search * s)958 static void cached_dev_write(struct cached_dev *dc, struct search *s)
959 {
960 struct closure *cl = &s->cl;
961 struct bio *bio = &s->bio.bio;
962 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
963 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
964
965 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
966
967 down_read_non_owner(&dc->writeback_lock);
968 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
969 /*
970 * We overlap with some dirty data undergoing background
971 * writeback, force this write to writeback
972 */
973 s->iop.bypass = false;
974 s->iop.writeback = true;
975 }
976
977 /*
978 * Discards aren't _required_ to do anything, so skipping if
979 * check_overlapping returned true is ok
980 *
981 * But check_overlapping drops dirty keys for which io hasn't started,
982 * so we still want to call it.
983 */
984 if (bio_op(bio) == REQ_OP_DISCARD)
985 s->iop.bypass = true;
986
987 if (should_writeback(dc, s->orig_bio,
988 cache_mode(dc),
989 s->iop.bypass)) {
990 s->iop.bypass = false;
991 s->iop.writeback = true;
992 }
993
994 if (s->iop.bypass) {
995 s->iop.bio = s->orig_bio;
996 bio_get(s->iop.bio);
997
998 if (bio_op(bio) == REQ_OP_DISCARD &&
999 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1000 goto insert_data;
1001
1002 /* I/O request sent to backing device */
1003 bio->bi_end_io = backing_request_endio;
1004 closure_bio_submit(s->iop.c, bio, cl);
1005
1006 } else if (s->iop.writeback) {
1007 bch_writeback_add(dc);
1008 s->iop.bio = bio;
1009
1010 if (bio->bi_opf & REQ_PREFLUSH) {
1011 /*
1012 * Also need to send a flush to the backing
1013 * device.
1014 */
1015 struct bio *flush;
1016
1017 flush = bio_alloc_bioset(GFP_NOIO, 0,
1018 &dc->disk.bio_split);
1019 if (!flush) {
1020 s->iop.status = BLK_STS_RESOURCE;
1021 goto insert_data;
1022 }
1023 bio_copy_dev(flush, bio);
1024 flush->bi_end_io = backing_request_endio;
1025 flush->bi_private = cl;
1026 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1027 /* I/O request sent to backing device */
1028 closure_bio_submit(s->iop.c, flush, cl);
1029 }
1030 } else {
1031 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1032 /* I/O request sent to backing device */
1033 bio->bi_end_io = backing_request_endio;
1034 closure_bio_submit(s->iop.c, bio, cl);
1035 }
1036
1037 insert_data:
1038 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1039 continue_at(cl, cached_dev_write_complete, NULL);
1040 }
1041
cached_dev_nodata(struct closure * cl)1042 static void cached_dev_nodata(struct closure *cl)
1043 {
1044 struct search *s = container_of(cl, struct search, cl);
1045 struct bio *bio = &s->bio.bio;
1046
1047 if (s->iop.flush_journal)
1048 bch_journal_meta(s->iop.c, cl);
1049
1050 /* If it's a flush, we send the flush to the backing device too */
1051 bio->bi_end_io = backing_request_endio;
1052 closure_bio_submit(s->iop.c, bio, cl);
1053
1054 continue_at(cl, cached_dev_bio_complete, NULL);
1055 }
1056
1057 struct detached_dev_io_private {
1058 struct bcache_device *d;
1059 unsigned long start_time;
1060 bio_end_io_t *bi_end_io;
1061 void *bi_private;
1062 };
1063
detached_dev_end_io(struct bio * bio)1064 static void detached_dev_end_io(struct bio *bio)
1065 {
1066 struct detached_dev_io_private *ddip;
1067
1068 ddip = bio->bi_private;
1069 bio->bi_end_io = ddip->bi_end_io;
1070 bio->bi_private = ddip->bi_private;
1071
1072 generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1073 &ddip->d->disk->part0, ddip->start_time);
1074
1075 if (bio->bi_status) {
1076 struct cached_dev *dc = container_of(ddip->d,
1077 struct cached_dev, disk);
1078 /* should count I/O error for backing device here */
1079 bch_count_backing_io_errors(dc, bio);
1080 }
1081
1082 kfree(ddip);
1083 bio->bi_end_io(bio);
1084 }
1085
detached_dev_do_request(struct bcache_device * d,struct bio * bio)1086 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1087 {
1088 struct detached_dev_io_private *ddip;
1089 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1090
1091 /*
1092 * no need to call closure_get(&dc->disk.cl),
1093 * because upper layer had already opened bcache device,
1094 * which would call closure_get(&dc->disk.cl)
1095 */
1096 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1097 ddip->d = d;
1098 ddip->start_time = jiffies;
1099 ddip->bi_end_io = bio->bi_end_io;
1100 ddip->bi_private = bio->bi_private;
1101 bio->bi_end_io = detached_dev_end_io;
1102 bio->bi_private = ddip;
1103
1104 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1105 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1106 bio->bi_end_io(bio);
1107 else
1108 generic_make_request(bio);
1109 }
1110
quit_max_writeback_rate(struct cache_set * c,struct cached_dev * this_dc)1111 static void quit_max_writeback_rate(struct cache_set *c,
1112 struct cached_dev *this_dc)
1113 {
1114 int i;
1115 struct bcache_device *d;
1116 struct cached_dev *dc;
1117
1118 /*
1119 * mutex bch_register_lock may compete with other parallel requesters,
1120 * or attach/detach operations on other backing device. Waiting to
1121 * the mutex lock may increase I/O request latency for seconds or more.
1122 * To avoid such situation, if mutext_trylock() failed, only writeback
1123 * rate of current cached device is set to 1, and __update_write_back()
1124 * will decide writeback rate of other cached devices (remember now
1125 * c->idle_counter is 0 already).
1126 */
1127 if (mutex_trylock(&bch_register_lock)) {
1128 for (i = 0; i < c->devices_max_used; i++) {
1129 if (!c->devices[i])
1130 continue;
1131
1132 if (UUID_FLASH_ONLY(&c->uuids[i]))
1133 continue;
1134
1135 d = c->devices[i];
1136 dc = container_of(d, struct cached_dev, disk);
1137 /*
1138 * set writeback rate to default minimum value,
1139 * then let update_writeback_rate() to decide the
1140 * upcoming rate.
1141 */
1142 atomic_long_set(&dc->writeback_rate.rate, 1);
1143 }
1144 mutex_unlock(&bch_register_lock);
1145 } else
1146 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1147 }
1148
1149 /* Cached devices - read & write stuff */
1150
cached_dev_make_request(struct request_queue * q,struct bio * bio)1151 static blk_qc_t cached_dev_make_request(struct request_queue *q,
1152 struct bio *bio)
1153 {
1154 struct search *s;
1155 struct bcache_device *d = bio->bi_disk->private_data;
1156 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1157 int rw = bio_data_dir(bio);
1158
1159 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1160 dc->io_disable)) {
1161 bio->bi_status = BLK_STS_IOERR;
1162 bio_endio(bio);
1163 return BLK_QC_T_NONE;
1164 }
1165
1166 if (likely(d->c)) {
1167 if (atomic_read(&d->c->idle_counter))
1168 atomic_set(&d->c->idle_counter, 0);
1169 /*
1170 * If at_max_writeback_rate of cache set is true and new I/O
1171 * comes, quit max writeback rate of all cached devices
1172 * attached to this cache set, and set at_max_writeback_rate
1173 * to false.
1174 */
1175 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1176 atomic_set(&d->c->at_max_writeback_rate, 0);
1177 quit_max_writeback_rate(d->c, dc);
1178 }
1179 }
1180
1181 generic_start_io_acct(q,
1182 bio_op(bio),
1183 bio_sectors(bio),
1184 &d->disk->part0);
1185
1186 bio_set_dev(bio, dc->bdev);
1187 bio->bi_iter.bi_sector += dc->sb.data_offset;
1188
1189 if (cached_dev_get(dc)) {
1190 s = search_alloc(bio, d);
1191 trace_bcache_request_start(s->d, bio);
1192
1193 if (!bio->bi_iter.bi_size) {
1194 /*
1195 * can't call bch_journal_meta from under
1196 * generic_make_request
1197 */
1198 continue_at_nobarrier(&s->cl,
1199 cached_dev_nodata,
1200 bcache_wq);
1201 } else {
1202 s->iop.bypass = check_should_bypass(dc, bio);
1203
1204 if (rw)
1205 cached_dev_write(dc, s);
1206 else
1207 cached_dev_read(dc, s);
1208 }
1209 } else
1210 /* I/O request sent to backing device */
1211 detached_dev_do_request(d, bio);
1212
1213 return BLK_QC_T_NONE;
1214 }
1215
cached_dev_ioctl(struct bcache_device * d,fmode_t mode,unsigned int cmd,unsigned long arg)1216 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1217 unsigned int cmd, unsigned long arg)
1218 {
1219 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1220
1221 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1222 }
1223
cached_dev_congested(void * data,int bits)1224 static int cached_dev_congested(void *data, int bits)
1225 {
1226 struct bcache_device *d = data;
1227 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1228 struct request_queue *q = bdev_get_queue(dc->bdev);
1229 int ret = 0;
1230
1231 if (bdi_congested(q->backing_dev_info, bits))
1232 return 1;
1233
1234 if (cached_dev_get(dc)) {
1235 unsigned int i;
1236 struct cache *ca;
1237
1238 for_each_cache(ca, d->c, i) {
1239 q = bdev_get_queue(ca->bdev);
1240 ret |= bdi_congested(q->backing_dev_info, bits);
1241 }
1242
1243 cached_dev_put(dc);
1244 }
1245
1246 return ret;
1247 }
1248
bch_cached_dev_request_init(struct cached_dev * dc)1249 void bch_cached_dev_request_init(struct cached_dev *dc)
1250 {
1251 struct gendisk *g = dc->disk.disk;
1252
1253 g->queue->make_request_fn = cached_dev_make_request;
1254 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1255 dc->disk.cache_miss = cached_dev_cache_miss;
1256 dc->disk.ioctl = cached_dev_ioctl;
1257 }
1258
1259 /* Flash backed devices */
1260
flash_dev_cache_miss(struct btree * b,struct search * s,struct bio * bio,unsigned int sectors)1261 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1262 struct bio *bio, unsigned int sectors)
1263 {
1264 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1265
1266 swap(bio->bi_iter.bi_size, bytes);
1267 zero_fill_bio(bio);
1268 swap(bio->bi_iter.bi_size, bytes);
1269
1270 bio_advance(bio, bytes);
1271
1272 if (!bio->bi_iter.bi_size)
1273 return MAP_DONE;
1274
1275 return MAP_CONTINUE;
1276 }
1277
flash_dev_nodata(struct closure * cl)1278 static void flash_dev_nodata(struct closure *cl)
1279 {
1280 struct search *s = container_of(cl, struct search, cl);
1281
1282 if (s->iop.flush_journal)
1283 bch_journal_meta(s->iop.c, cl);
1284
1285 continue_at(cl, search_free, NULL);
1286 }
1287
flash_dev_make_request(struct request_queue * q,struct bio * bio)1288 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1289 struct bio *bio)
1290 {
1291 struct search *s;
1292 struct closure *cl;
1293 struct bcache_device *d = bio->bi_disk->private_data;
1294
1295 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1296 bio->bi_status = BLK_STS_IOERR;
1297 bio_endio(bio);
1298 return BLK_QC_T_NONE;
1299 }
1300
1301 generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1302
1303 s = search_alloc(bio, d);
1304 cl = &s->cl;
1305 bio = &s->bio.bio;
1306
1307 trace_bcache_request_start(s->d, bio);
1308
1309 if (!bio->bi_iter.bi_size) {
1310 /*
1311 * can't call bch_journal_meta from under
1312 * generic_make_request
1313 */
1314 continue_at_nobarrier(&s->cl,
1315 flash_dev_nodata,
1316 bcache_wq);
1317 return BLK_QC_T_NONE;
1318 } else if (bio_data_dir(bio)) {
1319 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1320 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1321 &KEY(d->id, bio_end_sector(bio), 0));
1322
1323 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1324 s->iop.writeback = true;
1325 s->iop.bio = bio;
1326
1327 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1328 } else {
1329 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1330 }
1331
1332 continue_at(cl, search_free, NULL);
1333 return BLK_QC_T_NONE;
1334 }
1335
flash_dev_ioctl(struct bcache_device * d,fmode_t mode,unsigned int cmd,unsigned long arg)1336 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1337 unsigned int cmd, unsigned long arg)
1338 {
1339 return -ENOTTY;
1340 }
1341
flash_dev_congested(void * data,int bits)1342 static int flash_dev_congested(void *data, int bits)
1343 {
1344 struct bcache_device *d = data;
1345 struct request_queue *q;
1346 struct cache *ca;
1347 unsigned int i;
1348 int ret = 0;
1349
1350 for_each_cache(ca, d->c, i) {
1351 q = bdev_get_queue(ca->bdev);
1352 ret |= bdi_congested(q->backing_dev_info, bits);
1353 }
1354
1355 return ret;
1356 }
1357
bch_flash_dev_request_init(struct bcache_device * d)1358 void bch_flash_dev_request_init(struct bcache_device *d)
1359 {
1360 struct gendisk *g = d->disk;
1361
1362 g->queue->make_request_fn = flash_dev_make_request;
1363 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1364 d->cache_miss = flash_dev_cache_miss;
1365 d->ioctl = flash_dev_ioctl;
1366 }
1367
bch_request_exit(void)1368 void bch_request_exit(void)
1369 {
1370 kmem_cache_destroy(bch_search_cache);
1371 }
1372
bch_request_init(void)1373 int __init bch_request_init(void)
1374 {
1375 bch_search_cache = KMEM_CACHE(search, 0);
1376 if (!bch_search_cache)
1377 return -ENOMEM;
1378
1379 return 0;
1380 }
1381