1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache setup/teardown code, and some metadata io - read a superblock and
4 * figure out what to do with it.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "extents.h"
14 #include "request.h"
15 #include "writeback.h"
16
17 #include <linux/blkdev.h>
18 #include <linux/buffer_head.h>
19 #include <linux/debugfs.h>
20 #include <linux/genhd.h>
21 #include <linux/idr.h>
22 #include <linux/kthread.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/reboot.h>
26 #include <linux/sysfs.h>
27
28 MODULE_LICENSE("GPL");
29 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
30
31 static const char bcache_magic[] = {
32 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
33 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
34 };
35
36 static const char invalid_uuid[] = {
37 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
38 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
39 };
40
41 static struct kobject *bcache_kobj;
42 struct mutex bch_register_lock;
43 LIST_HEAD(bch_cache_sets);
44 static LIST_HEAD(uncached_devices);
45
46 static int bcache_major;
47 static DEFINE_IDA(bcache_device_idx);
48 static wait_queue_head_t unregister_wait;
49 struct workqueue_struct *bcache_wq;
50 struct workqueue_struct *bch_journal_wq;
51
52 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
53 /* limitation of partitions number on single bcache device */
54 #define BCACHE_MINORS 128
55 /* limitation of bcache devices number on single system */
56 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
57
58 /* Superblock */
59
read_super(struct cache_sb * sb,struct block_device * bdev,struct page ** res)60 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
61 struct page **res)
62 {
63 const char *err;
64 struct cache_sb *s;
65 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
66 unsigned int i;
67
68 if (!bh)
69 return "IO error";
70
71 s = (struct cache_sb *) bh->b_data;
72
73 sb->offset = le64_to_cpu(s->offset);
74 sb->version = le64_to_cpu(s->version);
75
76 memcpy(sb->magic, s->magic, 16);
77 memcpy(sb->uuid, s->uuid, 16);
78 memcpy(sb->set_uuid, s->set_uuid, 16);
79 memcpy(sb->label, s->label, SB_LABEL_SIZE);
80
81 sb->flags = le64_to_cpu(s->flags);
82 sb->seq = le64_to_cpu(s->seq);
83 sb->last_mount = le32_to_cpu(s->last_mount);
84 sb->first_bucket = le16_to_cpu(s->first_bucket);
85 sb->keys = le16_to_cpu(s->keys);
86
87 for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
88 sb->d[i] = le64_to_cpu(s->d[i]);
89
90 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
91 sb->version, sb->flags, sb->seq, sb->keys);
92
93 err = "Not a bcache superblock";
94 if (sb->offset != SB_SECTOR)
95 goto err;
96
97 if (memcmp(sb->magic, bcache_magic, 16))
98 goto err;
99
100 err = "Too many journal buckets";
101 if (sb->keys > SB_JOURNAL_BUCKETS)
102 goto err;
103
104 err = "Bad checksum";
105 if (s->csum != csum_set(s))
106 goto err;
107
108 err = "Bad UUID";
109 if (bch_is_zero(sb->uuid, 16))
110 goto err;
111
112 sb->block_size = le16_to_cpu(s->block_size);
113
114 err = "Superblock block size smaller than device block size";
115 if (sb->block_size << 9 < bdev_logical_block_size(bdev))
116 goto err;
117
118 switch (sb->version) {
119 case BCACHE_SB_VERSION_BDEV:
120 sb->data_offset = BDEV_DATA_START_DEFAULT;
121 break;
122 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
123 sb->data_offset = le64_to_cpu(s->data_offset);
124
125 err = "Bad data offset";
126 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
127 goto err;
128
129 break;
130 case BCACHE_SB_VERSION_CDEV:
131 case BCACHE_SB_VERSION_CDEV_WITH_UUID:
132 sb->nbuckets = le64_to_cpu(s->nbuckets);
133 sb->bucket_size = le16_to_cpu(s->bucket_size);
134
135 sb->nr_in_set = le16_to_cpu(s->nr_in_set);
136 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
137
138 err = "Too many buckets";
139 if (sb->nbuckets > LONG_MAX)
140 goto err;
141
142 err = "Not enough buckets";
143 if (sb->nbuckets < 1 << 7)
144 goto err;
145
146 err = "Bad block/bucket size";
147 if (!is_power_of_2(sb->block_size) ||
148 sb->block_size > PAGE_SECTORS ||
149 !is_power_of_2(sb->bucket_size) ||
150 sb->bucket_size < PAGE_SECTORS)
151 goto err;
152
153 err = "Invalid superblock: device too small";
154 if (get_capacity(bdev->bd_disk) <
155 sb->bucket_size * sb->nbuckets)
156 goto err;
157
158 err = "Bad UUID";
159 if (bch_is_zero(sb->set_uuid, 16))
160 goto err;
161
162 err = "Bad cache device number in set";
163 if (!sb->nr_in_set ||
164 sb->nr_in_set <= sb->nr_this_dev ||
165 sb->nr_in_set > MAX_CACHES_PER_SET)
166 goto err;
167
168 err = "Journal buckets not sequential";
169 for (i = 0; i < sb->keys; i++)
170 if (sb->d[i] != sb->first_bucket + i)
171 goto err;
172
173 err = "Too many journal buckets";
174 if (sb->first_bucket + sb->keys > sb->nbuckets)
175 goto err;
176
177 err = "Invalid superblock: first bucket comes before end of super";
178 if (sb->first_bucket * sb->bucket_size < 16)
179 goto err;
180
181 break;
182 default:
183 err = "Unsupported superblock version";
184 goto err;
185 }
186
187 sb->last_mount = (u32)ktime_get_real_seconds();
188 err = NULL;
189
190 get_page(bh->b_page);
191 *res = bh->b_page;
192 err:
193 put_bh(bh);
194 return err;
195 }
196
write_bdev_super_endio(struct bio * bio)197 static void write_bdev_super_endio(struct bio *bio)
198 {
199 struct cached_dev *dc = bio->bi_private;
200 /* XXX: error checking */
201
202 closure_put(&dc->sb_write);
203 }
204
__write_super(struct cache_sb * sb,struct bio * bio)205 static void __write_super(struct cache_sb *sb, struct bio *bio)
206 {
207 struct cache_sb *out = page_address(bio_first_page_all(bio));
208 unsigned int i;
209
210 bio->bi_iter.bi_sector = SB_SECTOR;
211 bio->bi_iter.bi_size = SB_SIZE;
212 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
213 bch_bio_map(bio, NULL);
214
215 out->offset = cpu_to_le64(sb->offset);
216 out->version = cpu_to_le64(sb->version);
217
218 memcpy(out->uuid, sb->uuid, 16);
219 memcpy(out->set_uuid, sb->set_uuid, 16);
220 memcpy(out->label, sb->label, SB_LABEL_SIZE);
221
222 out->flags = cpu_to_le64(sb->flags);
223 out->seq = cpu_to_le64(sb->seq);
224
225 out->last_mount = cpu_to_le32(sb->last_mount);
226 out->first_bucket = cpu_to_le16(sb->first_bucket);
227 out->keys = cpu_to_le16(sb->keys);
228
229 for (i = 0; i < sb->keys; i++)
230 out->d[i] = cpu_to_le64(sb->d[i]);
231
232 out->csum = csum_set(out);
233
234 pr_debug("ver %llu, flags %llu, seq %llu",
235 sb->version, sb->flags, sb->seq);
236
237 submit_bio(bio);
238 }
239
bch_write_bdev_super_unlock(struct closure * cl)240 static void bch_write_bdev_super_unlock(struct closure *cl)
241 {
242 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
243
244 up(&dc->sb_write_mutex);
245 }
246
bch_write_bdev_super(struct cached_dev * dc,struct closure * parent)247 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
248 {
249 struct closure *cl = &dc->sb_write;
250 struct bio *bio = &dc->sb_bio;
251
252 down(&dc->sb_write_mutex);
253 closure_init(cl, parent);
254
255 bio_reset(bio);
256 bio_set_dev(bio, dc->bdev);
257 bio->bi_end_io = write_bdev_super_endio;
258 bio->bi_private = dc;
259
260 closure_get(cl);
261 /* I/O request sent to backing device */
262 __write_super(&dc->sb, bio);
263
264 closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
265 }
266
write_super_endio(struct bio * bio)267 static void write_super_endio(struct bio *bio)
268 {
269 struct cache *ca = bio->bi_private;
270
271 /* is_read = 0 */
272 bch_count_io_errors(ca, bio->bi_status, 0,
273 "writing superblock");
274 closure_put(&ca->set->sb_write);
275 }
276
bcache_write_super_unlock(struct closure * cl)277 static void bcache_write_super_unlock(struct closure *cl)
278 {
279 struct cache_set *c = container_of(cl, struct cache_set, sb_write);
280
281 up(&c->sb_write_mutex);
282 }
283
bcache_write_super(struct cache_set * c)284 void bcache_write_super(struct cache_set *c)
285 {
286 struct closure *cl = &c->sb_write;
287 struct cache *ca;
288 unsigned int i;
289
290 down(&c->sb_write_mutex);
291 closure_init(cl, &c->cl);
292
293 c->sb.seq++;
294
295 for_each_cache(ca, c, i) {
296 struct bio *bio = &ca->sb_bio;
297
298 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
299 ca->sb.seq = c->sb.seq;
300 ca->sb.last_mount = c->sb.last_mount;
301
302 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
303
304 bio_reset(bio);
305 bio_set_dev(bio, ca->bdev);
306 bio->bi_end_io = write_super_endio;
307 bio->bi_private = ca;
308
309 closure_get(cl);
310 __write_super(&ca->sb, bio);
311 }
312
313 closure_return_with_destructor(cl, bcache_write_super_unlock);
314 }
315
316 /* UUID io */
317
uuid_endio(struct bio * bio)318 static void uuid_endio(struct bio *bio)
319 {
320 struct closure *cl = bio->bi_private;
321 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
322
323 cache_set_err_on(bio->bi_status, c, "accessing uuids");
324 bch_bbio_free(bio, c);
325 closure_put(cl);
326 }
327
uuid_io_unlock(struct closure * cl)328 static void uuid_io_unlock(struct closure *cl)
329 {
330 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
331
332 up(&c->uuid_write_mutex);
333 }
334
uuid_io(struct cache_set * c,int op,unsigned long op_flags,struct bkey * k,struct closure * parent)335 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
336 struct bkey *k, struct closure *parent)
337 {
338 struct closure *cl = &c->uuid_write;
339 struct uuid_entry *u;
340 unsigned int i;
341 char buf[80];
342
343 BUG_ON(!parent);
344 down(&c->uuid_write_mutex);
345 closure_init(cl, parent);
346
347 for (i = 0; i < KEY_PTRS(k); i++) {
348 struct bio *bio = bch_bbio_alloc(c);
349
350 bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
351 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
352
353 bio->bi_end_io = uuid_endio;
354 bio->bi_private = cl;
355 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
356 bch_bio_map(bio, c->uuids);
357
358 bch_submit_bbio(bio, c, k, i);
359
360 if (op != REQ_OP_WRITE)
361 break;
362 }
363
364 bch_extent_to_text(buf, sizeof(buf), k);
365 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
366
367 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
368 if (!bch_is_zero(u->uuid, 16))
369 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
370 u - c->uuids, u->uuid, u->label,
371 u->first_reg, u->last_reg, u->invalidated);
372
373 closure_return_with_destructor(cl, uuid_io_unlock);
374 }
375
uuid_read(struct cache_set * c,struct jset * j,struct closure * cl)376 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
377 {
378 struct bkey *k = &j->uuid_bucket;
379
380 if (__bch_btree_ptr_invalid(c, k))
381 return "bad uuid pointer";
382
383 bkey_copy(&c->uuid_bucket, k);
384 uuid_io(c, REQ_OP_READ, 0, k, cl);
385
386 if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
387 struct uuid_entry_v0 *u0 = (void *) c->uuids;
388 struct uuid_entry *u1 = (void *) c->uuids;
389 int i;
390
391 closure_sync(cl);
392
393 /*
394 * Since the new uuid entry is bigger than the old, we have to
395 * convert starting at the highest memory address and work down
396 * in order to do it in place
397 */
398
399 for (i = c->nr_uuids - 1;
400 i >= 0;
401 --i) {
402 memcpy(u1[i].uuid, u0[i].uuid, 16);
403 memcpy(u1[i].label, u0[i].label, 32);
404
405 u1[i].first_reg = u0[i].first_reg;
406 u1[i].last_reg = u0[i].last_reg;
407 u1[i].invalidated = u0[i].invalidated;
408
409 u1[i].flags = 0;
410 u1[i].sectors = 0;
411 }
412 }
413
414 return NULL;
415 }
416
__uuid_write(struct cache_set * c)417 static int __uuid_write(struct cache_set *c)
418 {
419 BKEY_PADDED(key) k;
420 struct closure cl;
421
422 closure_init_stack(&cl);
423 lockdep_assert_held(&bch_register_lock);
424
425 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
426 return 1;
427
428 SET_KEY_SIZE(&k.key, c->sb.bucket_size);
429 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
430 closure_sync(&cl);
431
432 bkey_copy(&c->uuid_bucket, &k.key);
433 bkey_put(c, &k.key);
434 return 0;
435 }
436
bch_uuid_write(struct cache_set * c)437 int bch_uuid_write(struct cache_set *c)
438 {
439 int ret = __uuid_write(c);
440
441 if (!ret)
442 bch_journal_meta(c, NULL);
443
444 return ret;
445 }
446
uuid_find(struct cache_set * c,const char * uuid)447 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
448 {
449 struct uuid_entry *u;
450
451 for (u = c->uuids;
452 u < c->uuids + c->nr_uuids; u++)
453 if (!memcmp(u->uuid, uuid, 16))
454 return u;
455
456 return NULL;
457 }
458
uuid_find_empty(struct cache_set * c)459 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
460 {
461 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
462
463 return uuid_find(c, zero_uuid);
464 }
465
466 /*
467 * Bucket priorities/gens:
468 *
469 * For each bucket, we store on disk its
470 * 8 bit gen
471 * 16 bit priority
472 *
473 * See alloc.c for an explanation of the gen. The priority is used to implement
474 * lru (and in the future other) cache replacement policies; for most purposes
475 * it's just an opaque integer.
476 *
477 * The gens and the priorities don't have a whole lot to do with each other, and
478 * it's actually the gens that must be written out at specific times - it's no
479 * big deal if the priorities don't get written, if we lose them we just reuse
480 * buckets in suboptimal order.
481 *
482 * On disk they're stored in a packed array, and in as many buckets are required
483 * to fit them all. The buckets we use to store them form a list; the journal
484 * header points to the first bucket, the first bucket points to the second
485 * bucket, et cetera.
486 *
487 * This code is used by the allocation code; periodically (whenever it runs out
488 * of buckets to allocate from) the allocation code will invalidate some
489 * buckets, but it can't use those buckets until their new gens are safely on
490 * disk.
491 */
492
prio_endio(struct bio * bio)493 static void prio_endio(struct bio *bio)
494 {
495 struct cache *ca = bio->bi_private;
496
497 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
498 bch_bbio_free(bio, ca->set);
499 closure_put(&ca->prio);
500 }
501
prio_io(struct cache * ca,uint64_t bucket,int op,unsigned long op_flags)502 static void prio_io(struct cache *ca, uint64_t bucket, int op,
503 unsigned long op_flags)
504 {
505 struct closure *cl = &ca->prio;
506 struct bio *bio = bch_bbio_alloc(ca->set);
507
508 closure_init_stack(cl);
509
510 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
511 bio_set_dev(bio, ca->bdev);
512 bio->bi_iter.bi_size = bucket_bytes(ca);
513
514 bio->bi_end_io = prio_endio;
515 bio->bi_private = ca;
516 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
517 bch_bio_map(bio, ca->disk_buckets);
518
519 closure_bio_submit(ca->set, bio, &ca->prio);
520 closure_sync(cl);
521 }
522
bch_prio_write(struct cache * ca)523 void bch_prio_write(struct cache *ca)
524 {
525 int i;
526 struct bucket *b;
527 struct closure cl;
528
529 closure_init_stack(&cl);
530
531 lockdep_assert_held(&ca->set->bucket_lock);
532
533 ca->disk_buckets->seq++;
534
535 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
536 &ca->meta_sectors_written);
537
538 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
539 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
540
541 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
542 long bucket;
543 struct prio_set *p = ca->disk_buckets;
544 struct bucket_disk *d = p->data;
545 struct bucket_disk *end = d + prios_per_bucket(ca);
546
547 for (b = ca->buckets + i * prios_per_bucket(ca);
548 b < ca->buckets + ca->sb.nbuckets && d < end;
549 b++, d++) {
550 d->prio = cpu_to_le16(b->prio);
551 d->gen = b->gen;
552 }
553
554 p->next_bucket = ca->prio_buckets[i + 1];
555 p->magic = pset_magic(&ca->sb);
556 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
557
558 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
559 BUG_ON(bucket == -1);
560
561 mutex_unlock(&ca->set->bucket_lock);
562 prio_io(ca, bucket, REQ_OP_WRITE, 0);
563 mutex_lock(&ca->set->bucket_lock);
564
565 ca->prio_buckets[i] = bucket;
566 atomic_dec_bug(&ca->buckets[bucket].pin);
567 }
568
569 mutex_unlock(&ca->set->bucket_lock);
570
571 bch_journal_meta(ca->set, &cl);
572 closure_sync(&cl);
573
574 mutex_lock(&ca->set->bucket_lock);
575
576 /*
577 * Don't want the old priorities to get garbage collected until after we
578 * finish writing the new ones, and they're journalled
579 */
580 for (i = 0; i < prio_buckets(ca); i++) {
581 if (ca->prio_last_buckets[i])
582 __bch_bucket_free(ca,
583 &ca->buckets[ca->prio_last_buckets[i]]);
584
585 ca->prio_last_buckets[i] = ca->prio_buckets[i];
586 }
587 }
588
prio_read(struct cache * ca,uint64_t bucket)589 static void prio_read(struct cache *ca, uint64_t bucket)
590 {
591 struct prio_set *p = ca->disk_buckets;
592 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
593 struct bucket *b;
594 unsigned int bucket_nr = 0;
595
596 for (b = ca->buckets;
597 b < ca->buckets + ca->sb.nbuckets;
598 b++, d++) {
599 if (d == end) {
600 ca->prio_buckets[bucket_nr] = bucket;
601 ca->prio_last_buckets[bucket_nr] = bucket;
602 bucket_nr++;
603
604 prio_io(ca, bucket, REQ_OP_READ, 0);
605
606 if (p->csum !=
607 bch_crc64(&p->magic, bucket_bytes(ca) - 8))
608 pr_warn("bad csum reading priorities");
609
610 if (p->magic != pset_magic(&ca->sb))
611 pr_warn("bad magic reading priorities");
612
613 bucket = p->next_bucket;
614 d = p->data;
615 }
616
617 b->prio = le16_to_cpu(d->prio);
618 b->gen = b->last_gc = d->gen;
619 }
620 }
621
622 /* Bcache device */
623
open_dev(struct block_device * b,fmode_t mode)624 static int open_dev(struct block_device *b, fmode_t mode)
625 {
626 struct bcache_device *d = b->bd_disk->private_data;
627
628 if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
629 return -ENXIO;
630
631 closure_get(&d->cl);
632 return 0;
633 }
634
release_dev(struct gendisk * b,fmode_t mode)635 static void release_dev(struct gendisk *b, fmode_t mode)
636 {
637 struct bcache_device *d = b->private_data;
638
639 closure_put(&d->cl);
640 }
641
ioctl_dev(struct block_device * b,fmode_t mode,unsigned int cmd,unsigned long arg)642 static int ioctl_dev(struct block_device *b, fmode_t mode,
643 unsigned int cmd, unsigned long arg)
644 {
645 struct bcache_device *d = b->bd_disk->private_data;
646 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
647
648 if (dc->io_disable)
649 return -EIO;
650
651 return d->ioctl(d, mode, cmd, arg);
652 }
653
654 static const struct block_device_operations bcache_ops = {
655 .open = open_dev,
656 .release = release_dev,
657 .ioctl = ioctl_dev,
658 .owner = THIS_MODULE,
659 };
660
bcache_device_stop(struct bcache_device * d)661 void bcache_device_stop(struct bcache_device *d)
662 {
663 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
664 closure_queue(&d->cl);
665 }
666
bcache_device_unlink(struct bcache_device * d)667 static void bcache_device_unlink(struct bcache_device *d)
668 {
669 lockdep_assert_held(&bch_register_lock);
670
671 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
672 unsigned int i;
673 struct cache *ca;
674
675 sysfs_remove_link(&d->c->kobj, d->name);
676 sysfs_remove_link(&d->kobj, "cache");
677
678 for_each_cache(ca, d->c, i)
679 bd_unlink_disk_holder(ca->bdev, d->disk);
680 }
681 }
682
bcache_device_link(struct bcache_device * d,struct cache_set * c,const char * name)683 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
684 const char *name)
685 {
686 unsigned int i;
687 struct cache *ca;
688
689 for_each_cache(ca, d->c, i)
690 bd_link_disk_holder(ca->bdev, d->disk);
691
692 snprintf(d->name, BCACHEDEVNAME_SIZE,
693 "%s%u", name, d->id);
694
695 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
696 sysfs_create_link(&c->kobj, &d->kobj, d->name),
697 "Couldn't create device <-> cache set symlinks");
698
699 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
700 }
701
bcache_device_detach(struct bcache_device * d)702 static void bcache_device_detach(struct bcache_device *d)
703 {
704 lockdep_assert_held(&bch_register_lock);
705
706 atomic_dec(&d->c->attached_dev_nr);
707
708 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
709 struct uuid_entry *u = d->c->uuids + d->id;
710
711 SET_UUID_FLASH_ONLY(u, 0);
712 memcpy(u->uuid, invalid_uuid, 16);
713 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
714 bch_uuid_write(d->c);
715 }
716
717 bcache_device_unlink(d);
718
719 d->c->devices[d->id] = NULL;
720 closure_put(&d->c->caching);
721 d->c = NULL;
722 }
723
bcache_device_attach(struct bcache_device * d,struct cache_set * c,unsigned int id)724 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
725 unsigned int id)
726 {
727 d->id = id;
728 d->c = c;
729 c->devices[id] = d;
730
731 if (id >= c->devices_max_used)
732 c->devices_max_used = id + 1;
733
734 closure_get(&c->caching);
735 }
736
first_minor_to_idx(int first_minor)737 static inline int first_minor_to_idx(int first_minor)
738 {
739 return (first_minor/BCACHE_MINORS);
740 }
741
idx_to_first_minor(int idx)742 static inline int idx_to_first_minor(int idx)
743 {
744 return (idx * BCACHE_MINORS);
745 }
746
bcache_device_free(struct bcache_device * d)747 static void bcache_device_free(struct bcache_device *d)
748 {
749 lockdep_assert_held(&bch_register_lock);
750
751 pr_info("%s stopped", d->disk->disk_name);
752
753 if (d->c)
754 bcache_device_detach(d);
755 if (d->disk && d->disk->flags & GENHD_FL_UP)
756 del_gendisk(d->disk);
757 if (d->disk && d->disk->queue)
758 blk_cleanup_queue(d->disk->queue);
759 if (d->disk) {
760 ida_simple_remove(&bcache_device_idx,
761 first_minor_to_idx(d->disk->first_minor));
762 put_disk(d->disk);
763 }
764
765 bioset_exit(&d->bio_split);
766 kvfree(d->full_dirty_stripes);
767 kvfree(d->stripe_sectors_dirty);
768
769 closure_debug_destroy(&d->cl);
770 }
771
bcache_device_init(struct bcache_device * d,unsigned int block_size,sector_t sectors)772 static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
773 sector_t sectors)
774 {
775 struct request_queue *q;
776 const size_t max_stripes = min_t(size_t, INT_MAX,
777 SIZE_MAX / sizeof(atomic_t));
778 size_t n;
779 int idx;
780
781 if (!d->stripe_size)
782 d->stripe_size = 1 << 31;
783
784 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
785
786 if (!d->nr_stripes || d->nr_stripes > max_stripes) {
787 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
788 (unsigned int)d->nr_stripes);
789 return -ENOMEM;
790 }
791
792 n = d->nr_stripes * sizeof(atomic_t);
793 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
794 if (!d->stripe_sectors_dirty)
795 return -ENOMEM;
796
797 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
798 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
799 if (!d->full_dirty_stripes)
800 return -ENOMEM;
801
802 idx = ida_simple_get(&bcache_device_idx, 0,
803 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
804 if (idx < 0)
805 return idx;
806
807 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
808 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
809 goto err;
810
811 d->disk = alloc_disk(BCACHE_MINORS);
812 if (!d->disk)
813 goto err;
814
815 set_capacity(d->disk, sectors);
816 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
817
818 d->disk->major = bcache_major;
819 d->disk->first_minor = idx_to_first_minor(idx);
820 d->disk->fops = &bcache_ops;
821 d->disk->private_data = d;
822
823 q = blk_alloc_queue(GFP_KERNEL);
824 if (!q)
825 return -ENOMEM;
826
827 blk_queue_make_request(q, NULL);
828 d->disk->queue = q;
829 q->queuedata = d;
830 q->backing_dev_info->congested_data = d;
831 q->limits.max_hw_sectors = UINT_MAX;
832 q->limits.max_sectors = UINT_MAX;
833 q->limits.max_segment_size = UINT_MAX;
834 q->limits.max_segments = BIO_MAX_PAGES;
835 blk_queue_max_discard_sectors(q, UINT_MAX);
836 q->limits.discard_granularity = 512;
837 q->limits.io_min = block_size;
838 q->limits.logical_block_size = block_size;
839 q->limits.physical_block_size = block_size;
840 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
841 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
842 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
843
844 blk_queue_write_cache(q, true, true);
845
846 return 0;
847
848 err:
849 ida_simple_remove(&bcache_device_idx, idx);
850 return -ENOMEM;
851
852 }
853
854 /* Cached device */
855
calc_cached_dev_sectors(struct cache_set * c)856 static void calc_cached_dev_sectors(struct cache_set *c)
857 {
858 uint64_t sectors = 0;
859 struct cached_dev *dc;
860
861 list_for_each_entry(dc, &c->cached_devs, list)
862 sectors += bdev_sectors(dc->bdev);
863
864 c->cached_dev_sectors = sectors;
865 }
866
867 #define BACKING_DEV_OFFLINE_TIMEOUT 5
cached_dev_status_update(void * arg)868 static int cached_dev_status_update(void *arg)
869 {
870 struct cached_dev *dc = arg;
871 struct request_queue *q;
872
873 /*
874 * If this delayed worker is stopping outside, directly quit here.
875 * dc->io_disable might be set via sysfs interface, so check it
876 * here too.
877 */
878 while (!kthread_should_stop() && !dc->io_disable) {
879 q = bdev_get_queue(dc->bdev);
880 if (blk_queue_dying(q))
881 dc->offline_seconds++;
882 else
883 dc->offline_seconds = 0;
884
885 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
886 pr_err("%s: device offline for %d seconds",
887 dc->backing_dev_name,
888 BACKING_DEV_OFFLINE_TIMEOUT);
889 pr_err("%s: disable I/O request due to backing "
890 "device offline", dc->disk.name);
891 dc->io_disable = true;
892 /* let others know earlier that io_disable is true */
893 smp_mb();
894 bcache_device_stop(&dc->disk);
895 break;
896 }
897 schedule_timeout_interruptible(HZ);
898 }
899
900 wait_for_kthread_stop();
901 return 0;
902 }
903
904
bch_cached_dev_run(struct cached_dev * dc)905 void bch_cached_dev_run(struct cached_dev *dc)
906 {
907 struct bcache_device *d = &dc->disk;
908 char buf[SB_LABEL_SIZE + 1];
909 char *env[] = {
910 "DRIVER=bcache",
911 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
912 NULL,
913 NULL,
914 };
915
916 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
917 buf[SB_LABEL_SIZE] = '\0';
918 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
919
920 if (atomic_xchg(&dc->running, 1)) {
921 kfree(env[1]);
922 kfree(env[2]);
923 return;
924 }
925
926 if (!d->c &&
927 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
928 struct closure cl;
929
930 closure_init_stack(&cl);
931
932 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
933 bch_write_bdev_super(dc, &cl);
934 closure_sync(&cl);
935 }
936
937 add_disk(d->disk);
938 bd_link_disk_holder(dc->bdev, dc->disk.disk);
939 /*
940 * won't show up in the uevent file, use udevadm monitor -e instead
941 * only class / kset properties are persistent
942 */
943 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
944 kfree(env[1]);
945 kfree(env[2]);
946
947 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
948 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
949 pr_debug("error creating sysfs link");
950
951 dc->status_update_thread = kthread_run(cached_dev_status_update,
952 dc, "bcache_status_update");
953 if (IS_ERR(dc->status_update_thread)) {
954 pr_warn("failed to create bcache_status_update kthread, "
955 "continue to run without monitoring backing "
956 "device status");
957 }
958 }
959
960 /*
961 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
962 * work dc->writeback_rate_update is running. Wait until the routine
963 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
964 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
965 * seconds, give up waiting here and continue to cancel it too.
966 */
cancel_writeback_rate_update_dwork(struct cached_dev * dc)967 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
968 {
969 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
970
971 do {
972 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
973 &dc->disk.flags))
974 break;
975 time_out--;
976 schedule_timeout_interruptible(1);
977 } while (time_out > 0);
978
979 if (time_out == 0)
980 pr_warn("give up waiting for dc->writeback_write_update to quit");
981
982 cancel_delayed_work_sync(&dc->writeback_rate_update);
983 }
984
cached_dev_detach_finish(struct work_struct * w)985 static void cached_dev_detach_finish(struct work_struct *w)
986 {
987 struct cached_dev *dc = container_of(w, struct cached_dev, detach);
988 struct closure cl;
989
990 closure_init_stack(&cl);
991
992 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
993 BUG_ON(refcount_read(&dc->count));
994
995 mutex_lock(&bch_register_lock);
996
997 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
998 cancel_writeback_rate_update_dwork(dc);
999
1000 if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
1001 kthread_stop(dc->writeback_thread);
1002 dc->writeback_thread = NULL;
1003 }
1004
1005 memset(&dc->sb.set_uuid, 0, 16);
1006 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
1007
1008 bch_write_bdev_super(dc, &cl);
1009 closure_sync(&cl);
1010
1011 bcache_device_detach(&dc->disk);
1012 list_move(&dc->list, &uncached_devices);
1013
1014 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
1015 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1016
1017 mutex_unlock(&bch_register_lock);
1018
1019 pr_info("Caching disabled for %s", dc->backing_dev_name);
1020
1021 /* Drop ref we took in cached_dev_detach() */
1022 closure_put(&dc->disk.cl);
1023 }
1024
bch_cached_dev_detach(struct cached_dev * dc)1025 void bch_cached_dev_detach(struct cached_dev *dc)
1026 {
1027 lockdep_assert_held(&bch_register_lock);
1028
1029 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1030 return;
1031
1032 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1033 return;
1034
1035 /*
1036 * Block the device from being closed and freed until we're finished
1037 * detaching
1038 */
1039 closure_get(&dc->disk.cl);
1040
1041 bch_writeback_queue(dc);
1042
1043 cached_dev_put(dc);
1044 }
1045
bch_cached_dev_attach(struct cached_dev * dc,struct cache_set * c,uint8_t * set_uuid)1046 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1047 uint8_t *set_uuid)
1048 {
1049 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
1050 struct uuid_entry *u;
1051 struct cached_dev *exist_dc, *t;
1052
1053 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
1054 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
1055 return -ENOENT;
1056
1057 if (dc->disk.c) {
1058 pr_err("Can't attach %s: already attached",
1059 dc->backing_dev_name);
1060 return -EINVAL;
1061 }
1062
1063 if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1064 pr_err("Can't attach %s: shutting down",
1065 dc->backing_dev_name);
1066 return -EINVAL;
1067 }
1068
1069 if (dc->sb.block_size < c->sb.block_size) {
1070 /* Will die */
1071 pr_err("Couldn't attach %s: block size less than set's block size",
1072 dc->backing_dev_name);
1073 return -EINVAL;
1074 }
1075
1076 /* Check whether already attached */
1077 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1078 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1079 pr_err("Tried to attach %s but duplicate UUID already attached",
1080 dc->backing_dev_name);
1081
1082 return -EINVAL;
1083 }
1084 }
1085
1086 u = uuid_find(c, dc->sb.uuid);
1087
1088 if (u &&
1089 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
1090 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
1091 memcpy(u->uuid, invalid_uuid, 16);
1092 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
1093 u = NULL;
1094 }
1095
1096 if (!u) {
1097 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1098 pr_err("Couldn't find uuid for %s in set",
1099 dc->backing_dev_name);
1100 return -ENOENT;
1101 }
1102
1103 u = uuid_find_empty(c);
1104 if (!u) {
1105 pr_err("Not caching %s, no room for UUID",
1106 dc->backing_dev_name);
1107 return -EINVAL;
1108 }
1109 }
1110
1111 /*
1112 * Deadlocks since we're called via sysfs...
1113 * sysfs_remove_file(&dc->kobj, &sysfs_attach);
1114 */
1115
1116 if (bch_is_zero(u->uuid, 16)) {
1117 struct closure cl;
1118
1119 closure_init_stack(&cl);
1120
1121 memcpy(u->uuid, dc->sb.uuid, 16);
1122 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1123 u->first_reg = u->last_reg = rtime;
1124 bch_uuid_write(c);
1125
1126 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
1127 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1128
1129 bch_write_bdev_super(dc, &cl);
1130 closure_sync(&cl);
1131 } else {
1132 u->last_reg = rtime;
1133 bch_uuid_write(c);
1134 }
1135
1136 bcache_device_attach(&dc->disk, c, u - c->uuids);
1137 list_move(&dc->list, &c->cached_devs);
1138 calc_cached_dev_sectors(c);
1139
1140 /*
1141 * dc->c must be set before dc->count != 0 - paired with the mb in
1142 * cached_dev_get()
1143 */
1144 smp_wmb();
1145 refcount_set(&dc->count, 1);
1146
1147 /* Block writeback thread, but spawn it */
1148 down_write(&dc->writeback_lock);
1149 if (bch_cached_dev_writeback_start(dc)) {
1150 up_write(&dc->writeback_lock);
1151 return -ENOMEM;
1152 }
1153
1154 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1155 bch_sectors_dirty_init(&dc->disk);
1156 atomic_set(&dc->has_dirty, 1);
1157 bch_writeback_queue(dc);
1158 }
1159
1160 bch_cached_dev_run(dc);
1161 bcache_device_link(&dc->disk, c, "bdev");
1162 atomic_inc(&c->attached_dev_nr);
1163
1164 /* Allow the writeback thread to proceed */
1165 up_write(&dc->writeback_lock);
1166
1167 pr_info("Caching %s as %s on set %pU",
1168 dc->backing_dev_name,
1169 dc->disk.disk->disk_name,
1170 dc->disk.c->sb.set_uuid);
1171 return 0;
1172 }
1173
bch_cached_dev_release(struct kobject * kobj)1174 void bch_cached_dev_release(struct kobject *kobj)
1175 {
1176 struct cached_dev *dc = container_of(kobj, struct cached_dev,
1177 disk.kobj);
1178 kfree(dc);
1179 module_put(THIS_MODULE);
1180 }
1181
cached_dev_free(struct closure * cl)1182 static void cached_dev_free(struct closure *cl)
1183 {
1184 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1185
1186 mutex_lock(&bch_register_lock);
1187
1188 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1189 cancel_writeback_rate_update_dwork(dc);
1190
1191 if (!IS_ERR_OR_NULL(dc->writeback_thread))
1192 kthread_stop(dc->writeback_thread);
1193 if (dc->writeback_write_wq)
1194 destroy_workqueue(dc->writeback_write_wq);
1195 if (!IS_ERR_OR_NULL(dc->status_update_thread))
1196 kthread_stop(dc->status_update_thread);
1197
1198 if (atomic_read(&dc->running))
1199 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1200 bcache_device_free(&dc->disk);
1201 list_del(&dc->list);
1202
1203 mutex_unlock(&bch_register_lock);
1204
1205 if (!IS_ERR_OR_NULL(dc->bdev))
1206 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1207
1208 wake_up(&unregister_wait);
1209
1210 kobject_put(&dc->disk.kobj);
1211 }
1212
cached_dev_flush(struct closure * cl)1213 static void cached_dev_flush(struct closure *cl)
1214 {
1215 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1216 struct bcache_device *d = &dc->disk;
1217
1218 mutex_lock(&bch_register_lock);
1219 bcache_device_unlink(d);
1220 mutex_unlock(&bch_register_lock);
1221
1222 bch_cache_accounting_destroy(&dc->accounting);
1223 kobject_del(&d->kobj);
1224
1225 continue_at(cl, cached_dev_free, system_wq);
1226 }
1227
cached_dev_init(struct cached_dev * dc,unsigned int block_size)1228 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
1229 {
1230 int ret;
1231 struct io *io;
1232 struct request_queue *q = bdev_get_queue(dc->bdev);
1233
1234 __module_get(THIS_MODULE);
1235 INIT_LIST_HEAD(&dc->list);
1236 closure_init(&dc->disk.cl, NULL);
1237 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1238 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1239 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1240 sema_init(&dc->sb_write_mutex, 1);
1241 INIT_LIST_HEAD(&dc->io_lru);
1242 spin_lock_init(&dc->io_lock);
1243 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1244
1245 dc->sequential_cutoff = 4 << 20;
1246
1247 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1248 list_add(&io->lru, &dc->io_lru);
1249 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1250 }
1251
1252 dc->disk.stripe_size = q->limits.io_opt >> 9;
1253
1254 if (dc->disk.stripe_size)
1255 dc->partial_stripes_expensive =
1256 q->limits.raid_partial_stripes_expensive;
1257
1258 ret = bcache_device_init(&dc->disk, block_size,
1259 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1260 if (ret)
1261 return ret;
1262
1263 dc->disk.disk->queue->backing_dev_info->ra_pages =
1264 max(dc->disk.disk->queue->backing_dev_info->ra_pages,
1265 q->backing_dev_info->ra_pages);
1266
1267 atomic_set(&dc->io_errors, 0);
1268 dc->io_disable = false;
1269 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1270 /* default to auto */
1271 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
1272
1273 bch_cached_dev_request_init(dc);
1274 bch_cached_dev_writeback_init(dc);
1275 return 0;
1276 }
1277
1278 /* Cached device - bcache superblock */
1279
register_bdev(struct cache_sb * sb,struct page * sb_page,struct block_device * bdev,struct cached_dev * dc)1280 static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1281 struct block_device *bdev,
1282 struct cached_dev *dc)
1283 {
1284 const char *err = "cannot allocate memory";
1285 struct cache_set *c;
1286
1287 bdevname(bdev, dc->backing_dev_name);
1288 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1289 dc->bdev = bdev;
1290 dc->bdev->bd_holder = dc;
1291
1292 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
1293 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
1294 get_page(sb_page);
1295
1296
1297 if (cached_dev_init(dc, sb->block_size << 9))
1298 goto err;
1299
1300 err = "error creating kobject";
1301 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1302 "bcache"))
1303 goto err;
1304 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1305 goto err;
1306
1307 pr_info("registered backing device %s", dc->backing_dev_name);
1308
1309 list_add(&dc->list, &uncached_devices);
1310 /* attach to a matched cache set if it exists */
1311 list_for_each_entry(c, &bch_cache_sets, list)
1312 bch_cached_dev_attach(dc, c, NULL);
1313
1314 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1315 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1316 bch_cached_dev_run(dc);
1317
1318 return;
1319 err:
1320 pr_notice("error %s: %s", dc->backing_dev_name, err);
1321 bcache_device_stop(&dc->disk);
1322 }
1323
1324 /* Flash only volumes */
1325
bch_flash_dev_release(struct kobject * kobj)1326 void bch_flash_dev_release(struct kobject *kobj)
1327 {
1328 struct bcache_device *d = container_of(kobj, struct bcache_device,
1329 kobj);
1330 kfree(d);
1331 }
1332
flash_dev_free(struct closure * cl)1333 static void flash_dev_free(struct closure *cl)
1334 {
1335 struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1336
1337 mutex_lock(&bch_register_lock);
1338 atomic_long_sub(bcache_dev_sectors_dirty(d),
1339 &d->c->flash_dev_dirty_sectors);
1340 bcache_device_free(d);
1341 mutex_unlock(&bch_register_lock);
1342 kobject_put(&d->kobj);
1343 }
1344
flash_dev_flush(struct closure * cl)1345 static void flash_dev_flush(struct closure *cl)
1346 {
1347 struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1348
1349 mutex_lock(&bch_register_lock);
1350 bcache_device_unlink(d);
1351 mutex_unlock(&bch_register_lock);
1352 kobject_del(&d->kobj);
1353 continue_at(cl, flash_dev_free, system_wq);
1354 }
1355
flash_dev_run(struct cache_set * c,struct uuid_entry * u)1356 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1357 {
1358 struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1359 GFP_KERNEL);
1360 if (!d)
1361 return -ENOMEM;
1362
1363 closure_init(&d->cl, NULL);
1364 set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1365
1366 kobject_init(&d->kobj, &bch_flash_dev_ktype);
1367
1368 if (bcache_device_init(d, block_bytes(c), u->sectors))
1369 goto err;
1370
1371 bcache_device_attach(d, c, u - c->uuids);
1372 bch_sectors_dirty_init(d);
1373 bch_flash_dev_request_init(d);
1374 add_disk(d->disk);
1375
1376 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1377 goto err;
1378
1379 bcache_device_link(d, c, "volume");
1380
1381 return 0;
1382 err:
1383 kobject_put(&d->kobj);
1384 return -ENOMEM;
1385 }
1386
flash_devs_run(struct cache_set * c)1387 static int flash_devs_run(struct cache_set *c)
1388 {
1389 int ret = 0;
1390 struct uuid_entry *u;
1391
1392 for (u = c->uuids;
1393 u < c->uuids + c->nr_uuids && !ret;
1394 u++)
1395 if (UUID_FLASH_ONLY(u))
1396 ret = flash_dev_run(c, u);
1397
1398 return ret;
1399 }
1400
bch_flash_dev_create(struct cache_set * c,uint64_t size)1401 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1402 {
1403 struct uuid_entry *u;
1404
1405 if (test_bit(CACHE_SET_STOPPING, &c->flags))
1406 return -EINTR;
1407
1408 if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1409 return -EPERM;
1410
1411 u = uuid_find_empty(c);
1412 if (!u) {
1413 pr_err("Can't create volume, no room for UUID");
1414 return -EINVAL;
1415 }
1416
1417 get_random_bytes(u->uuid, 16);
1418 memset(u->label, 0, 32);
1419 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
1420
1421 SET_UUID_FLASH_ONLY(u, 1);
1422 u->sectors = size >> 9;
1423
1424 bch_uuid_write(c);
1425
1426 return flash_dev_run(c, u);
1427 }
1428
bch_cached_dev_error(struct cached_dev * dc)1429 bool bch_cached_dev_error(struct cached_dev *dc)
1430 {
1431 struct cache_set *c;
1432
1433 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1434 return false;
1435
1436 dc->io_disable = true;
1437 /* make others know io_disable is true earlier */
1438 smp_mb();
1439
1440 pr_err("stop %s: too many IO errors on backing device %s\n",
1441 dc->disk.disk->disk_name, dc->backing_dev_name);
1442
1443 /*
1444 * If the cached device is still attached to a cache set,
1445 * even dc->io_disable is true and no more I/O requests
1446 * accepted, cache device internal I/O (writeback scan or
1447 * garbage collection) may still prevent bcache device from
1448 * being stopped. So here CACHE_SET_IO_DISABLE should be
1449 * set to c->flags too, to make the internal I/O to cache
1450 * device rejected and stopped immediately.
1451 * If c is NULL, that means the bcache device is not attached
1452 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
1453 */
1454 c = dc->disk.c;
1455 if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1456 pr_info("CACHE_SET_IO_DISABLE already set");
1457
1458 bcache_device_stop(&dc->disk);
1459 return true;
1460 }
1461
1462 /* Cache set */
1463
1464 __printf(2, 3)
bch_cache_set_error(struct cache_set * c,const char * fmt,...)1465 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1466 {
1467 va_list args;
1468
1469 if (c->on_error != ON_ERROR_PANIC &&
1470 test_bit(CACHE_SET_STOPPING, &c->flags))
1471 return false;
1472
1473 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1474 pr_info("CACHE_SET_IO_DISABLE already set");
1475
1476 /*
1477 * XXX: we can be called from atomic context
1478 * acquire_console_sem();
1479 */
1480
1481 pr_err("bcache: error on %pU: ", c->sb.set_uuid);
1482
1483 va_start(args, fmt);
1484 vprintk(fmt, args);
1485 va_end(args);
1486
1487 pr_err(", disabling caching\n");
1488
1489 if (c->on_error == ON_ERROR_PANIC)
1490 panic("panic forced after error\n");
1491
1492 bch_cache_set_unregister(c);
1493 return true;
1494 }
1495
bch_cache_set_release(struct kobject * kobj)1496 void bch_cache_set_release(struct kobject *kobj)
1497 {
1498 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1499
1500 kfree(c);
1501 module_put(THIS_MODULE);
1502 }
1503
cache_set_free(struct closure * cl)1504 static void cache_set_free(struct closure *cl)
1505 {
1506 struct cache_set *c = container_of(cl, struct cache_set, cl);
1507 struct cache *ca;
1508 unsigned int i;
1509
1510 if (!IS_ERR_OR_NULL(c->debug))
1511 debugfs_remove(c->debug);
1512
1513 bch_open_buckets_free(c);
1514 bch_btree_cache_free(c);
1515 bch_journal_free(c);
1516
1517 for_each_cache(ca, c, i)
1518 if (ca) {
1519 ca->set = NULL;
1520 c->cache[ca->sb.nr_this_dev] = NULL;
1521 kobject_put(&ca->kobj);
1522 }
1523
1524 bch_bset_sort_state_free(&c->sort);
1525 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1526
1527 if (c->moving_gc_wq)
1528 destroy_workqueue(c->moving_gc_wq);
1529 bioset_exit(&c->bio_split);
1530 mempool_exit(&c->fill_iter);
1531 mempool_exit(&c->bio_meta);
1532 mempool_exit(&c->search);
1533 kfree(c->devices);
1534
1535 mutex_lock(&bch_register_lock);
1536 list_del(&c->list);
1537 mutex_unlock(&bch_register_lock);
1538
1539 pr_info("Cache set %pU unregistered", c->sb.set_uuid);
1540 wake_up(&unregister_wait);
1541
1542 closure_debug_destroy(&c->cl);
1543 kobject_put(&c->kobj);
1544 }
1545
cache_set_flush(struct closure * cl)1546 static void cache_set_flush(struct closure *cl)
1547 {
1548 struct cache_set *c = container_of(cl, struct cache_set, caching);
1549 struct cache *ca;
1550 struct btree *b;
1551 unsigned int i;
1552
1553 bch_cache_accounting_destroy(&c->accounting);
1554
1555 kobject_put(&c->internal);
1556 kobject_del(&c->kobj);
1557
1558 if (c->gc_thread)
1559 kthread_stop(c->gc_thread);
1560
1561 if (!IS_ERR_OR_NULL(c->root))
1562 list_add(&c->root->list, &c->btree_cache);
1563
1564 /* Should skip this if we're unregistering because of an error */
1565 list_for_each_entry(b, &c->btree_cache, list) {
1566 mutex_lock(&b->write_lock);
1567 if (btree_node_dirty(b))
1568 __bch_btree_node_write(b, NULL);
1569 mutex_unlock(&b->write_lock);
1570 }
1571
1572 for_each_cache(ca, c, i)
1573 if (ca->alloc_thread)
1574 kthread_stop(ca->alloc_thread);
1575
1576 if (c->journal.cur) {
1577 cancel_delayed_work_sync(&c->journal.work);
1578 /* flush last journal entry if needed */
1579 c->journal.work.work.func(&c->journal.work.work);
1580 }
1581
1582 closure_return(cl);
1583 }
1584
1585 /*
1586 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1587 * cache set is unregistering due to too many I/O errors. In this condition,
1588 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1589 * value and whether the broken cache has dirty data:
1590 *
1591 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
1592 * BCH_CACHED_STOP_AUTO 0 NO
1593 * BCH_CACHED_STOP_AUTO 1 YES
1594 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
1595 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
1596 *
1597 * The expected behavior is, if stop_when_cache_set_failed is configured to
1598 * "auto" via sysfs interface, the bcache device will not be stopped if the
1599 * backing device is clean on the broken cache device.
1600 */
conditional_stop_bcache_device(struct cache_set * c,struct bcache_device * d,struct cached_dev * dc)1601 static void conditional_stop_bcache_device(struct cache_set *c,
1602 struct bcache_device *d,
1603 struct cached_dev *dc)
1604 {
1605 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
1606 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
1607 d->disk->disk_name, c->sb.set_uuid);
1608 bcache_device_stop(d);
1609 } else if (atomic_read(&dc->has_dirty)) {
1610 /*
1611 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1612 * and dc->has_dirty == 1
1613 */
1614 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
1615 d->disk->disk_name);
1616 /*
1617 * There might be a small time gap that cache set is
1618 * released but bcache device is not. Inside this time
1619 * gap, regular I/O requests will directly go into
1620 * backing device as no cache set attached to. This
1621 * behavior may also introduce potential inconsistence
1622 * data in writeback mode while cache is dirty.
1623 * Therefore before calling bcache_device_stop() due
1624 * to a broken cache device, dc->io_disable should be
1625 * explicitly set to true.
1626 */
1627 dc->io_disable = true;
1628 /* make others know io_disable is true earlier */
1629 smp_mb();
1630 bcache_device_stop(d);
1631 } else {
1632 /*
1633 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1634 * and dc->has_dirty == 0
1635 */
1636 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
1637 d->disk->disk_name);
1638 }
1639 }
1640
__cache_set_unregister(struct closure * cl)1641 static void __cache_set_unregister(struct closure *cl)
1642 {
1643 struct cache_set *c = container_of(cl, struct cache_set, caching);
1644 struct cached_dev *dc;
1645 struct bcache_device *d;
1646 size_t i;
1647
1648 mutex_lock(&bch_register_lock);
1649
1650 for (i = 0; i < c->devices_max_used; i++) {
1651 d = c->devices[i];
1652 if (!d)
1653 continue;
1654
1655 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1656 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1657 dc = container_of(d, struct cached_dev, disk);
1658 bch_cached_dev_detach(dc);
1659 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1660 conditional_stop_bcache_device(c, d, dc);
1661 } else {
1662 bcache_device_stop(d);
1663 }
1664 }
1665
1666 mutex_unlock(&bch_register_lock);
1667
1668 continue_at(cl, cache_set_flush, system_wq);
1669 }
1670
bch_cache_set_stop(struct cache_set * c)1671 void bch_cache_set_stop(struct cache_set *c)
1672 {
1673 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1674 closure_queue(&c->caching);
1675 }
1676
bch_cache_set_unregister(struct cache_set * c)1677 void bch_cache_set_unregister(struct cache_set *c)
1678 {
1679 set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1680 bch_cache_set_stop(c);
1681 }
1682
1683 #define alloc_bucket_pages(gfp, c) \
1684 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1685
bch_cache_set_alloc(struct cache_sb * sb)1686 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1687 {
1688 int iter_size;
1689 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1690
1691 if (!c)
1692 return NULL;
1693
1694 __module_get(THIS_MODULE);
1695 closure_init(&c->cl, NULL);
1696 set_closure_fn(&c->cl, cache_set_free, system_wq);
1697
1698 closure_init(&c->caching, &c->cl);
1699 set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1700
1701 /* Maybe create continue_at_noreturn() and use it here? */
1702 closure_set_stopped(&c->cl);
1703 closure_put(&c->cl);
1704
1705 kobject_init(&c->kobj, &bch_cache_set_ktype);
1706 kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1707
1708 bch_cache_accounting_init(&c->accounting, &c->cl);
1709
1710 memcpy(c->sb.set_uuid, sb->set_uuid, 16);
1711 c->sb.block_size = sb->block_size;
1712 c->sb.bucket_size = sb->bucket_size;
1713 c->sb.nr_in_set = sb->nr_in_set;
1714 c->sb.last_mount = sb->last_mount;
1715 c->bucket_bits = ilog2(sb->bucket_size);
1716 c->block_bits = ilog2(sb->block_size);
1717 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
1718 c->devices_max_used = 0;
1719 atomic_set(&c->attached_dev_nr, 0);
1720 c->btree_pages = bucket_pages(c);
1721 if (c->btree_pages > BTREE_MAX_PAGES)
1722 c->btree_pages = max_t(int, c->btree_pages / 4,
1723 BTREE_MAX_PAGES);
1724
1725 sema_init(&c->sb_write_mutex, 1);
1726 mutex_init(&c->bucket_lock);
1727 init_waitqueue_head(&c->btree_cache_wait);
1728 init_waitqueue_head(&c->bucket_wait);
1729 init_waitqueue_head(&c->gc_wait);
1730 sema_init(&c->uuid_write_mutex, 1);
1731
1732 spin_lock_init(&c->btree_gc_time.lock);
1733 spin_lock_init(&c->btree_split_time.lock);
1734 spin_lock_init(&c->btree_read_time.lock);
1735
1736 bch_moving_init_cache_set(c);
1737
1738 INIT_LIST_HEAD(&c->list);
1739 INIT_LIST_HEAD(&c->cached_devs);
1740 INIT_LIST_HEAD(&c->btree_cache);
1741 INIT_LIST_HEAD(&c->btree_cache_freeable);
1742 INIT_LIST_HEAD(&c->btree_cache_freed);
1743 INIT_LIST_HEAD(&c->data_buckets);
1744
1745 iter_size = (sb->bucket_size / sb->block_size + 1) *
1746 sizeof(struct btree_iter_set);
1747
1748 if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
1749 mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
1750 mempool_init_kmalloc_pool(&c->bio_meta, 2,
1751 sizeof(struct bbio) + sizeof(struct bio_vec) *
1752 bucket_pages(c)) ||
1753 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
1754 bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
1755 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
1756 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1757 !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
1758 WQ_MEM_RECLAIM, 0)) ||
1759 bch_journal_alloc(c) ||
1760 bch_btree_cache_alloc(c) ||
1761 bch_open_buckets_alloc(c) ||
1762 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1763 goto err;
1764
1765 c->congested_read_threshold_us = 2000;
1766 c->congested_write_threshold_us = 20000;
1767 c->error_limit = DEFAULT_IO_ERROR_LIMIT;
1768 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
1769
1770 return c;
1771 err:
1772 bch_cache_set_unregister(c);
1773 return NULL;
1774 }
1775
run_cache_set(struct cache_set * c)1776 static void run_cache_set(struct cache_set *c)
1777 {
1778 const char *err = "cannot allocate memory";
1779 struct cached_dev *dc, *t;
1780 struct cache *ca;
1781 struct closure cl;
1782 unsigned int i;
1783
1784 closure_init_stack(&cl);
1785
1786 for_each_cache(ca, c, i)
1787 c->nbuckets += ca->sb.nbuckets;
1788 set_gc_sectors(c);
1789
1790 if (CACHE_SYNC(&c->sb)) {
1791 LIST_HEAD(journal);
1792 struct bkey *k;
1793 struct jset *j;
1794
1795 err = "cannot allocate memory for journal";
1796 if (bch_journal_read(c, &journal))
1797 goto err;
1798
1799 pr_debug("btree_journal_read() done");
1800
1801 err = "no journal entries found";
1802 if (list_empty(&journal))
1803 goto err;
1804
1805 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1806
1807 err = "IO error reading priorities";
1808 for_each_cache(ca, c, i)
1809 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
1810
1811 /*
1812 * If prio_read() fails it'll call cache_set_error and we'll
1813 * tear everything down right away, but if we perhaps checked
1814 * sooner we could avoid journal replay.
1815 */
1816
1817 k = &j->btree_root;
1818
1819 err = "bad btree root";
1820 if (__bch_btree_ptr_invalid(c, k))
1821 goto err;
1822
1823 err = "error reading btree root";
1824 c->root = bch_btree_node_get(c, NULL, k,
1825 j->btree_level,
1826 true, NULL);
1827 if (IS_ERR_OR_NULL(c->root))
1828 goto err;
1829
1830 list_del_init(&c->root->list);
1831 rw_unlock(true, c->root);
1832
1833 err = uuid_read(c, j, &cl);
1834 if (err)
1835 goto err;
1836
1837 err = "error in recovery";
1838 if (bch_btree_check(c))
1839 goto err;
1840
1841 bch_journal_mark(c, &journal);
1842 bch_initial_gc_finish(c);
1843 pr_debug("btree_check() done");
1844
1845 /*
1846 * bcache_journal_next() can't happen sooner, or
1847 * btree_gc_finish() will give spurious errors about last_gc >
1848 * gc_gen - this is a hack but oh well.
1849 */
1850 bch_journal_next(&c->journal);
1851
1852 err = "error starting allocator thread";
1853 for_each_cache(ca, c, i)
1854 if (bch_cache_allocator_start(ca))
1855 goto err;
1856
1857 /*
1858 * First place it's safe to allocate: btree_check() and
1859 * btree_gc_finish() have to run before we have buckets to
1860 * allocate, and bch_bucket_alloc_set() might cause a journal
1861 * entry to be written so bcache_journal_next() has to be called
1862 * first.
1863 *
1864 * If the uuids were in the old format we have to rewrite them
1865 * before the next journal entry is written:
1866 */
1867 if (j->version < BCACHE_JSET_VERSION_UUID)
1868 __uuid_write(c);
1869
1870 bch_journal_replay(c, &journal);
1871 } else {
1872 pr_notice("invalidating existing data");
1873
1874 for_each_cache(ca, c, i) {
1875 unsigned int j;
1876
1877 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
1878 2, SB_JOURNAL_BUCKETS);
1879
1880 for (j = 0; j < ca->sb.keys; j++)
1881 ca->sb.d[j] = ca->sb.first_bucket + j;
1882 }
1883
1884 bch_initial_gc_finish(c);
1885
1886 err = "error starting allocator thread";
1887 for_each_cache(ca, c, i)
1888 if (bch_cache_allocator_start(ca))
1889 goto err;
1890
1891 mutex_lock(&c->bucket_lock);
1892 for_each_cache(ca, c, i)
1893 bch_prio_write(ca);
1894 mutex_unlock(&c->bucket_lock);
1895
1896 err = "cannot allocate new UUID bucket";
1897 if (__uuid_write(c))
1898 goto err;
1899
1900 err = "cannot allocate new btree root";
1901 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
1902 if (IS_ERR_OR_NULL(c->root))
1903 goto err;
1904
1905 mutex_lock(&c->root->write_lock);
1906 bkey_copy_key(&c->root->key, &MAX_KEY);
1907 bch_btree_node_write(c->root, &cl);
1908 mutex_unlock(&c->root->write_lock);
1909
1910 bch_btree_set_root(c->root);
1911 rw_unlock(true, c->root);
1912
1913 /*
1914 * We don't want to write the first journal entry until
1915 * everything is set up - fortunately journal entries won't be
1916 * written until the SET_CACHE_SYNC() here:
1917 */
1918 SET_CACHE_SYNC(&c->sb, true);
1919
1920 bch_journal_next(&c->journal);
1921 bch_journal_meta(c, &cl);
1922 }
1923
1924 err = "error starting gc thread";
1925 if (bch_gc_thread_start(c))
1926 goto err;
1927
1928 closure_sync(&cl);
1929 c->sb.last_mount = (u32)ktime_get_real_seconds();
1930 bcache_write_super(c);
1931
1932 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1933 bch_cached_dev_attach(dc, c, NULL);
1934
1935 flash_devs_run(c);
1936
1937 set_bit(CACHE_SET_RUNNING, &c->flags);
1938 return;
1939 err:
1940 closure_sync(&cl);
1941 /* XXX: test this, it's broken */
1942 bch_cache_set_error(c, "%s", err);
1943 }
1944
can_attach_cache(struct cache * ca,struct cache_set * c)1945 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1946 {
1947 return ca->sb.block_size == c->sb.block_size &&
1948 ca->sb.bucket_size == c->sb.bucket_size &&
1949 ca->sb.nr_in_set == c->sb.nr_in_set;
1950 }
1951
register_cache_set(struct cache * ca)1952 static const char *register_cache_set(struct cache *ca)
1953 {
1954 char buf[12];
1955 const char *err = "cannot allocate memory";
1956 struct cache_set *c;
1957
1958 list_for_each_entry(c, &bch_cache_sets, list)
1959 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
1960 if (c->cache[ca->sb.nr_this_dev])
1961 return "duplicate cache set member";
1962
1963 if (!can_attach_cache(ca, c))
1964 return "cache sb does not match set";
1965
1966 if (!CACHE_SYNC(&ca->sb))
1967 SET_CACHE_SYNC(&c->sb, false);
1968
1969 goto found;
1970 }
1971
1972 c = bch_cache_set_alloc(&ca->sb);
1973 if (!c)
1974 return err;
1975
1976 err = "error creating kobject";
1977 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
1978 kobject_add(&c->internal, &c->kobj, "internal"))
1979 goto err;
1980
1981 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
1982 goto err;
1983
1984 bch_debug_init_cache_set(c);
1985
1986 list_add(&c->list, &bch_cache_sets);
1987 found:
1988 sprintf(buf, "cache%i", ca->sb.nr_this_dev);
1989 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
1990 sysfs_create_link(&c->kobj, &ca->kobj, buf))
1991 goto err;
1992
1993 if (ca->sb.seq > c->sb.seq) {
1994 c->sb.version = ca->sb.version;
1995 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
1996 c->sb.flags = ca->sb.flags;
1997 c->sb.seq = ca->sb.seq;
1998 pr_debug("set version = %llu", c->sb.version);
1999 }
2000
2001 kobject_get(&ca->kobj);
2002 ca->set = c;
2003 ca->set->cache[ca->sb.nr_this_dev] = ca;
2004 c->cache_by_alloc[c->caches_loaded++] = ca;
2005
2006 if (c->caches_loaded == c->sb.nr_in_set)
2007 run_cache_set(c);
2008
2009 return NULL;
2010 err:
2011 bch_cache_set_unregister(c);
2012 return err;
2013 }
2014
2015 /* Cache device */
2016
bch_cache_release(struct kobject * kobj)2017 void bch_cache_release(struct kobject *kobj)
2018 {
2019 struct cache *ca = container_of(kobj, struct cache, kobj);
2020 unsigned int i;
2021
2022 if (ca->set) {
2023 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
2024 ca->set->cache[ca->sb.nr_this_dev] = NULL;
2025 }
2026
2027 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
2028 kfree(ca->prio_buckets);
2029 vfree(ca->buckets);
2030
2031 free_heap(&ca->heap);
2032 free_fifo(&ca->free_inc);
2033
2034 for (i = 0; i < RESERVE_NR; i++)
2035 free_fifo(&ca->free[i]);
2036
2037 if (ca->sb_bio.bi_inline_vecs[0].bv_page)
2038 put_page(bio_first_page_all(&ca->sb_bio));
2039
2040 if (!IS_ERR_OR_NULL(ca->bdev))
2041 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2042
2043 kfree(ca);
2044 module_put(THIS_MODULE);
2045 }
2046
cache_alloc(struct cache * ca)2047 static int cache_alloc(struct cache *ca)
2048 {
2049 size_t free;
2050 size_t btree_buckets;
2051 struct bucket *b;
2052
2053 __module_get(THIS_MODULE);
2054 kobject_init(&ca->kobj, &bch_cache_ktype);
2055
2056 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
2057
2058 /*
2059 * when ca->sb.njournal_buckets is not zero, journal exists,
2060 * and in bch_journal_replay(), tree node may split,
2061 * so bucket of RESERVE_BTREE type is needed,
2062 * the worst situation is all journal buckets are valid journal,
2063 * and all the keys need to replay,
2064 * so the number of RESERVE_BTREE type buckets should be as much
2065 * as journal buckets
2066 */
2067 btree_buckets = ca->sb.njournal_buckets ?: 8;
2068 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2069
2070 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
2071 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
2072 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
2073 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
2074 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
2075 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
2076 !(ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2077 ca->sb.nbuckets))) ||
2078 !(ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
2079 prio_buckets(ca), 2),
2080 GFP_KERNEL)) ||
2081 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
2082 return -ENOMEM;
2083
2084 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2085
2086 for_each_bucket(b, ca)
2087 atomic_set(&b->pin, 0);
2088
2089 return 0;
2090 }
2091
register_cache(struct cache_sb * sb,struct page * sb_page,struct block_device * bdev,struct cache * ca)2092 static int register_cache(struct cache_sb *sb, struct page *sb_page,
2093 struct block_device *bdev, struct cache *ca)
2094 {
2095 const char *err = NULL; /* must be set for any error case */
2096 int ret = 0;
2097
2098 bdevname(bdev, ca->cache_dev_name);
2099 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2100 ca->bdev = bdev;
2101 ca->bdev->bd_holder = ca;
2102
2103 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
2104 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
2105 get_page(sb_page);
2106
2107 if (blk_queue_discard(bdev_get_queue(bdev)))
2108 ca->discard = CACHE_DISCARD(&ca->sb);
2109
2110 ret = cache_alloc(ca);
2111 if (ret != 0) {
2112 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2113 if (ret == -ENOMEM)
2114 err = "cache_alloc(): -ENOMEM";
2115 else
2116 err = "cache_alloc(): unknown error";
2117 goto err;
2118 }
2119
2120 if (kobject_add(&ca->kobj,
2121 &part_to_dev(bdev->bd_part)->kobj,
2122 "bcache")) {
2123 err = "error calling kobject_add";
2124 ret = -ENOMEM;
2125 goto out;
2126 }
2127
2128 mutex_lock(&bch_register_lock);
2129 err = register_cache_set(ca);
2130 mutex_unlock(&bch_register_lock);
2131
2132 if (err) {
2133 ret = -ENODEV;
2134 goto out;
2135 }
2136
2137 pr_info("registered cache device %s", ca->cache_dev_name);
2138
2139 out:
2140 kobject_put(&ca->kobj);
2141
2142 err:
2143 if (err)
2144 pr_notice("error %s: %s", ca->cache_dev_name, err);
2145
2146 return ret;
2147 }
2148
2149 /* Global interfaces/init */
2150
2151 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2152 const char *buffer, size_t size);
2153
2154 kobj_attribute_write(register, register_bcache);
2155 kobj_attribute_write(register_quiet, register_bcache);
2156
bch_is_open_backing(struct block_device * bdev)2157 static bool bch_is_open_backing(struct block_device *bdev)
2158 {
2159 struct cache_set *c, *tc;
2160 struct cached_dev *dc, *t;
2161
2162 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2163 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
2164 if (dc->bdev == bdev)
2165 return true;
2166 list_for_each_entry_safe(dc, t, &uncached_devices, list)
2167 if (dc->bdev == bdev)
2168 return true;
2169 return false;
2170 }
2171
bch_is_open_cache(struct block_device * bdev)2172 static bool bch_is_open_cache(struct block_device *bdev)
2173 {
2174 struct cache_set *c, *tc;
2175 struct cache *ca;
2176 unsigned int i;
2177
2178 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2179 for_each_cache(ca, c, i)
2180 if (ca->bdev == bdev)
2181 return true;
2182 return false;
2183 }
2184
bch_is_open(struct block_device * bdev)2185 static bool bch_is_open(struct block_device *bdev)
2186 {
2187 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
2188 }
2189
register_bcache(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)2190 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2191 const char *buffer, size_t size)
2192 {
2193 ssize_t ret = size;
2194 const char *err = "cannot allocate memory";
2195 char *path = NULL;
2196 struct cache_sb *sb = NULL;
2197 struct block_device *bdev = NULL;
2198 struct page *sb_page = NULL;
2199
2200 if (!try_module_get(THIS_MODULE))
2201 return -EBUSY;
2202
2203 path = kstrndup(buffer, size, GFP_KERNEL);
2204 if (!path)
2205 goto err;
2206
2207 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
2208 if (!sb)
2209 goto err;
2210
2211 err = "failed to open device";
2212 bdev = blkdev_get_by_path(strim(path),
2213 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2214 sb);
2215 if (IS_ERR(bdev)) {
2216 if (bdev == ERR_PTR(-EBUSY)) {
2217 bdev = lookup_bdev(strim(path));
2218 mutex_lock(&bch_register_lock);
2219 if (!IS_ERR(bdev) && bch_is_open(bdev))
2220 err = "device already registered";
2221 else
2222 err = "device busy";
2223 mutex_unlock(&bch_register_lock);
2224 if (!IS_ERR(bdev))
2225 bdput(bdev);
2226 if (attr == &ksysfs_register_quiet)
2227 goto out;
2228 }
2229 goto err;
2230 }
2231
2232 err = "failed to set blocksize";
2233 if (set_blocksize(bdev, 4096))
2234 goto err_close;
2235
2236 err = read_super(sb, bdev, &sb_page);
2237 if (err)
2238 goto err_close;
2239
2240 err = "failed to register device";
2241 if (SB_IS_BDEV(sb)) {
2242 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2243
2244 if (!dc)
2245 goto err_close;
2246
2247 mutex_lock(&bch_register_lock);
2248 register_bdev(sb, sb_page, bdev, dc);
2249 mutex_unlock(&bch_register_lock);
2250 } else {
2251 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2252
2253 if (!ca)
2254 goto err_close;
2255
2256 if (register_cache(sb, sb_page, bdev, ca) != 0)
2257 goto err;
2258 }
2259 out:
2260 if (sb_page)
2261 put_page(sb_page);
2262 kfree(sb);
2263 kfree(path);
2264 module_put(THIS_MODULE);
2265 return ret;
2266
2267 err_close:
2268 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2269 err:
2270 pr_info("error %s: %s", path, err);
2271 ret = -EINVAL;
2272 goto out;
2273 }
2274
bcache_reboot(struct notifier_block * n,unsigned long code,void * x)2275 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2276 {
2277 if (code == SYS_DOWN ||
2278 code == SYS_HALT ||
2279 code == SYS_POWER_OFF) {
2280 DEFINE_WAIT(wait);
2281 unsigned long start = jiffies;
2282 bool stopped = false;
2283
2284 struct cache_set *c, *tc;
2285 struct cached_dev *dc, *tdc;
2286
2287 mutex_lock(&bch_register_lock);
2288
2289 if (list_empty(&bch_cache_sets) &&
2290 list_empty(&uncached_devices))
2291 goto out;
2292
2293 pr_info("Stopping all devices:");
2294
2295 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2296 bch_cache_set_stop(c);
2297
2298 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2299 bcache_device_stop(&dc->disk);
2300
2301 /* What's a condition variable? */
2302 while (1) {
2303 long timeout = start + 2 * HZ - jiffies;
2304
2305 stopped = list_empty(&bch_cache_sets) &&
2306 list_empty(&uncached_devices);
2307
2308 if (timeout < 0 || stopped)
2309 break;
2310
2311 prepare_to_wait(&unregister_wait, &wait,
2312 TASK_UNINTERRUPTIBLE);
2313
2314 mutex_unlock(&bch_register_lock);
2315 schedule_timeout(timeout);
2316 mutex_lock(&bch_register_lock);
2317 }
2318
2319 finish_wait(&unregister_wait, &wait);
2320
2321 if (stopped)
2322 pr_info("All devices stopped");
2323 else
2324 pr_notice("Timeout waiting for devices to be closed");
2325 out:
2326 mutex_unlock(&bch_register_lock);
2327 }
2328
2329 return NOTIFY_DONE;
2330 }
2331
2332 static struct notifier_block reboot = {
2333 .notifier_call = bcache_reboot,
2334 .priority = INT_MAX, /* before any real devices */
2335 };
2336
bcache_exit(void)2337 static void bcache_exit(void)
2338 {
2339 bch_debug_exit();
2340 bch_request_exit();
2341 if (bcache_kobj)
2342 kobject_put(bcache_kobj);
2343 if (bcache_wq)
2344 destroy_workqueue(bcache_wq);
2345 if (bch_journal_wq)
2346 destroy_workqueue(bch_journal_wq);
2347
2348 if (bcache_major)
2349 unregister_blkdev(bcache_major, "bcache");
2350 unregister_reboot_notifier(&reboot);
2351 mutex_destroy(&bch_register_lock);
2352 }
2353
bcache_init(void)2354 static int __init bcache_init(void)
2355 {
2356 static const struct attribute *files[] = {
2357 &ksysfs_register.attr,
2358 &ksysfs_register_quiet.attr,
2359 NULL
2360 };
2361
2362 mutex_init(&bch_register_lock);
2363 init_waitqueue_head(&unregister_wait);
2364 register_reboot_notifier(&reboot);
2365
2366 bcache_major = register_blkdev(0, "bcache");
2367 if (bcache_major < 0) {
2368 unregister_reboot_notifier(&reboot);
2369 mutex_destroy(&bch_register_lock);
2370 return bcache_major;
2371 }
2372
2373 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
2374 if (!bcache_wq)
2375 goto err;
2376
2377 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
2378 if (!bch_journal_wq)
2379 goto err;
2380
2381 bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2382 if (!bcache_kobj)
2383 goto err;
2384
2385 if (bch_request_init() ||
2386 sysfs_create_files(bcache_kobj, files))
2387 goto err;
2388
2389 bch_debug_init(bcache_kobj);
2390 closure_debug_init();
2391
2392 return 0;
2393 err:
2394 bcache_exit();
2395 return -ENOMEM;
2396 }
2397
2398 module_exit(bcache_exit);
2399 module_init(bcache_init);
2400