1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Primary bucket allocation code
4 *
5 * Copyright 2012 Google, Inc.
6 *
7 * Allocation in bcache is done in terms of buckets:
8 *
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10 * btree pointers - they must match for the pointer to be considered valid.
11 *
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
14 *
15 * The gens (along with the priorities; it's really the gens are important but
16 * the code is named as if it's the priorities) are written in an arbitrary list
17 * of buckets on disk, with a pointer to them in the journal header.
18 *
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
21 * could have pointers that appeared to be good but pointed to data that had
22 * been overwritten.
23 *
24 * Since the gens and priorities are all stored contiguously on disk, we can
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
27 * free_inc list and optionally discard them.
28 *
29 * free_inc isn't the only freelist - if it was, we'd often to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
31 * smaller freelist, and buckets on that list are always ready to be used.
32 *
33 * If we've got discards enabled, that happens when a bucket moves from the
34 * free_inc list to the free list.
35 *
36 * There is another freelist, because sometimes we have buckets that we know
37 * have nothing pointing into them - these we can reuse without waiting for
38 * priorities to be rewritten. These come from freed btree nodes and buckets
39 * that garbage collection discovered no longer had valid keys pointing into
40 * them (because they were overwritten). That's the unused list - buckets on the
41 * unused list move to the free list, optionally being discarded in the process.
42 *
43 * It's also important to ensure that gens don't wrap around - with respect to
44 * either the oldest gen in the btree or the gen on disk. This is quite
45 * difficult to do in practice, but we explicitly guard against it anyways - if
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
47 * time around, and we garbage collect or rewrite the priorities sooner than we
48 * would have otherwise.
49 *
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
51 *
52 * bch_bucket_alloc_set() allocates one or more buckets from different caches
53 * out of a cache set.
54 *
55 * free_some_buckets() drives all the processes described above. It's called
56 * from bch_bucket_alloc() and a few other places that need to make sure free
57 * buckets are ready.
58 *
59 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
60 * invalidated, and then invalidate them and stick them on the free_inc list -
61 * in either lru or fifo order.
62 */
63
64 #include "bcache.h"
65 #include "btree.h"
66
67 #include <linux/blkdev.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <trace/events/bcache.h>
71
72 #define MAX_OPEN_BUCKETS 128
73
74 /* Bucket heap / gen */
75
bch_inc_gen(struct cache * ca,struct bucket * b)76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77 {
78 uint8_t ret = ++b->gen;
79
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82
83 return ret;
84 }
85
bch_rescale_priorities(struct cache_set * c,int sectors)86 void bch_rescale_priorities(struct cache_set *c, int sectors)
87 {
88 struct cache *ca;
89 struct bucket *b;
90 unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
91 unsigned int i;
92 int r;
93
94 atomic_sub(sectors, &c->rescale);
95
96 do {
97 r = atomic_read(&c->rescale);
98
99 if (r >= 0)
100 return;
101 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
102
103 mutex_lock(&c->bucket_lock);
104
105 c->min_prio = USHRT_MAX;
106
107 for_each_cache(ca, c, i)
108 for_each_bucket(b, ca)
109 if (b->prio &&
110 b->prio != BTREE_PRIO &&
111 !atomic_read(&b->pin)) {
112 b->prio--;
113 c->min_prio = min(c->min_prio, b->prio);
114 }
115
116 mutex_unlock(&c->bucket_lock);
117 }
118
119 /*
120 * Background allocation thread: scans for buckets to be invalidated,
121 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
122 * then optionally issues discard commands to the newly free buckets, then puts
123 * them on the various freelists.
124 */
125
can_inc_bucket_gen(struct bucket * b)126 static inline bool can_inc_bucket_gen(struct bucket *b)
127 {
128 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
129 }
130
bch_can_invalidate_bucket(struct cache * ca,struct bucket * b)131 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
132 {
133 BUG_ON(!ca->set->gc_mark_valid);
134
135 return (!GC_MARK(b) ||
136 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
137 !atomic_read(&b->pin) &&
138 can_inc_bucket_gen(b);
139 }
140
__bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)141 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
142 {
143 lockdep_assert_held(&ca->set->bucket_lock);
144 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
145
146 if (GC_SECTORS_USED(b))
147 trace_bcache_invalidate(ca, b - ca->buckets);
148
149 bch_inc_gen(ca, b);
150 b->prio = INITIAL_PRIO;
151 atomic_inc(&b->pin);
152 }
153
bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)154 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
155 {
156 __bch_invalidate_one_bucket(ca, b);
157
158 fifo_push(&ca->free_inc, b - ca->buckets);
159 }
160
161 /*
162 * Determines what order we're going to reuse buckets, smallest bucket_prio()
163 * first: we also take into account the number of sectors of live data in that
164 * bucket, and in order for that multiply to make sense we have to scale bucket
165 *
166 * Thus, we scale the bucket priorities so that the bucket with the smallest
167 * prio is worth 1/8th of what INITIAL_PRIO is worth.
168 */
169
170 #define bucket_prio(b) \
171 ({ \
172 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
173 \
174 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
175 })
176
177 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
178 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
179
invalidate_buckets_lru(struct cache * ca)180 static void invalidate_buckets_lru(struct cache *ca)
181 {
182 struct bucket *b;
183 ssize_t i;
184
185 ca->heap.used = 0;
186
187 for_each_bucket(b, ca) {
188 if (!bch_can_invalidate_bucket(ca, b))
189 continue;
190
191 if (!heap_full(&ca->heap))
192 heap_add(&ca->heap, b, bucket_max_cmp);
193 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
194 ca->heap.data[0] = b;
195 heap_sift(&ca->heap, 0, bucket_max_cmp);
196 }
197 }
198
199 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
200 heap_sift(&ca->heap, i, bucket_min_cmp);
201
202 while (!fifo_full(&ca->free_inc)) {
203 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
204 /*
205 * We don't want to be calling invalidate_buckets()
206 * multiple times when it can't do anything
207 */
208 ca->invalidate_needs_gc = 1;
209 wake_up_gc(ca->set);
210 return;
211 }
212
213 bch_invalidate_one_bucket(ca, b);
214 }
215 }
216
invalidate_buckets_fifo(struct cache * ca)217 static void invalidate_buckets_fifo(struct cache *ca)
218 {
219 struct bucket *b;
220 size_t checked = 0;
221
222 while (!fifo_full(&ca->free_inc)) {
223 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
224 ca->fifo_last_bucket >= ca->sb.nbuckets)
225 ca->fifo_last_bucket = ca->sb.first_bucket;
226
227 b = ca->buckets + ca->fifo_last_bucket++;
228
229 if (bch_can_invalidate_bucket(ca, b))
230 bch_invalidate_one_bucket(ca, b);
231
232 if (++checked >= ca->sb.nbuckets) {
233 ca->invalidate_needs_gc = 1;
234 wake_up_gc(ca->set);
235 return;
236 }
237 }
238 }
239
invalidate_buckets_random(struct cache * ca)240 static void invalidate_buckets_random(struct cache *ca)
241 {
242 struct bucket *b;
243 size_t checked = 0;
244
245 while (!fifo_full(&ca->free_inc)) {
246 size_t n;
247
248 get_random_bytes(&n, sizeof(n));
249
250 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
251 n += ca->sb.first_bucket;
252
253 b = ca->buckets + n;
254
255 if (bch_can_invalidate_bucket(ca, b))
256 bch_invalidate_one_bucket(ca, b);
257
258 if (++checked >= ca->sb.nbuckets / 2) {
259 ca->invalidate_needs_gc = 1;
260 wake_up_gc(ca->set);
261 return;
262 }
263 }
264 }
265
invalidate_buckets(struct cache * ca)266 static void invalidate_buckets(struct cache *ca)
267 {
268 BUG_ON(ca->invalidate_needs_gc);
269
270 switch (CACHE_REPLACEMENT(&ca->sb)) {
271 case CACHE_REPLACEMENT_LRU:
272 invalidate_buckets_lru(ca);
273 break;
274 case CACHE_REPLACEMENT_FIFO:
275 invalidate_buckets_fifo(ca);
276 break;
277 case CACHE_REPLACEMENT_RANDOM:
278 invalidate_buckets_random(ca);
279 break;
280 }
281 }
282
283 #define allocator_wait(ca, cond) \
284 do { \
285 while (1) { \
286 set_current_state(TASK_INTERRUPTIBLE); \
287 if (cond) \
288 break; \
289 \
290 mutex_unlock(&(ca)->set->bucket_lock); \
291 if (kthread_should_stop() || \
292 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
293 set_current_state(TASK_RUNNING); \
294 goto out; \
295 } \
296 \
297 schedule(); \
298 mutex_lock(&(ca)->set->bucket_lock); \
299 } \
300 __set_current_state(TASK_RUNNING); \
301 } while (0)
302
bch_allocator_push(struct cache * ca,long bucket)303 static int bch_allocator_push(struct cache *ca, long bucket)
304 {
305 unsigned int i;
306
307 /* Prios/gens are actually the most important reserve */
308 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
309 return true;
310
311 for (i = 0; i < RESERVE_NR; i++)
312 if (fifo_push(&ca->free[i], bucket))
313 return true;
314
315 return false;
316 }
317
bch_allocator_thread(void * arg)318 static int bch_allocator_thread(void *arg)
319 {
320 struct cache *ca = arg;
321
322 mutex_lock(&ca->set->bucket_lock);
323
324 while (1) {
325 /*
326 * First, we pull buckets off of the unused and free_inc lists,
327 * possibly issue discards to them, then we add the bucket to
328 * the free list:
329 */
330 while (!fifo_empty(&ca->free_inc)) {
331 long bucket;
332
333 fifo_pop(&ca->free_inc, bucket);
334
335 if (ca->discard) {
336 mutex_unlock(&ca->set->bucket_lock);
337 blkdev_issue_discard(ca->bdev,
338 bucket_to_sector(ca->set, bucket),
339 ca->sb.bucket_size, GFP_KERNEL, 0);
340 mutex_lock(&ca->set->bucket_lock);
341 }
342
343 allocator_wait(ca, bch_allocator_push(ca, bucket));
344 wake_up(&ca->set->btree_cache_wait);
345 wake_up(&ca->set->bucket_wait);
346 }
347
348 /*
349 * We've run out of free buckets, we need to find some buckets
350 * we can invalidate. First, invalidate them in memory and add
351 * them to the free_inc list:
352 */
353
354 retry_invalidate:
355 allocator_wait(ca, ca->set->gc_mark_valid &&
356 !ca->invalidate_needs_gc);
357 invalidate_buckets(ca);
358
359 /*
360 * Now, we write their new gens to disk so we can start writing
361 * new stuff to them:
362 */
363 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
364 if (CACHE_SYNC(&ca->set->sb)) {
365 /*
366 * This could deadlock if an allocation with a btree
367 * node locked ever blocked - having the btree node
368 * locked would block garbage collection, but here we're
369 * waiting on garbage collection before we invalidate
370 * and free anything.
371 *
372 * But this should be safe since the btree code always
373 * uses btree_check_reserve() before allocating now, and
374 * if it fails it blocks without btree nodes locked.
375 */
376 if (!fifo_full(&ca->free_inc))
377 goto retry_invalidate;
378
379 bch_prio_write(ca);
380 }
381 }
382 out:
383 wait_for_kthread_stop();
384 return 0;
385 }
386
387 /* Allocation */
388
bch_bucket_alloc(struct cache * ca,unsigned int reserve,bool wait)389 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
390 {
391 DEFINE_WAIT(w);
392 struct bucket *b;
393 long r;
394
395 /* fastpath */
396 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
397 fifo_pop(&ca->free[reserve], r))
398 goto out;
399
400 if (!wait) {
401 trace_bcache_alloc_fail(ca, reserve);
402 return -1;
403 }
404
405 do {
406 prepare_to_wait(&ca->set->bucket_wait, &w,
407 TASK_UNINTERRUPTIBLE);
408
409 mutex_unlock(&ca->set->bucket_lock);
410 schedule();
411 mutex_lock(&ca->set->bucket_lock);
412 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
413 !fifo_pop(&ca->free[reserve], r));
414
415 finish_wait(&ca->set->bucket_wait, &w);
416 out:
417 if (ca->alloc_thread)
418 wake_up_process(ca->alloc_thread);
419
420 trace_bcache_alloc(ca, reserve);
421
422 if (expensive_debug_checks(ca->set)) {
423 size_t iter;
424 long i;
425 unsigned int j;
426
427 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
428 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
429
430 for (j = 0; j < RESERVE_NR; j++)
431 fifo_for_each(i, &ca->free[j], iter)
432 BUG_ON(i == r);
433 fifo_for_each(i, &ca->free_inc, iter)
434 BUG_ON(i == r);
435 }
436
437 b = ca->buckets + r;
438
439 BUG_ON(atomic_read(&b->pin) != 1);
440
441 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
442
443 if (reserve <= RESERVE_PRIO) {
444 SET_GC_MARK(b, GC_MARK_METADATA);
445 SET_GC_MOVE(b, 0);
446 b->prio = BTREE_PRIO;
447 } else {
448 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
449 SET_GC_MOVE(b, 0);
450 b->prio = INITIAL_PRIO;
451 }
452
453 if (ca->set->avail_nbuckets > 0) {
454 ca->set->avail_nbuckets--;
455 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
456 }
457
458 return r;
459 }
460
__bch_bucket_free(struct cache * ca,struct bucket * b)461 void __bch_bucket_free(struct cache *ca, struct bucket *b)
462 {
463 SET_GC_MARK(b, 0);
464 SET_GC_SECTORS_USED(b, 0);
465
466 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
467 ca->set->avail_nbuckets++;
468 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
469 }
470 }
471
bch_bucket_free(struct cache_set * c,struct bkey * k)472 void bch_bucket_free(struct cache_set *c, struct bkey *k)
473 {
474 unsigned int i;
475
476 for (i = 0; i < KEY_PTRS(k); i++)
477 __bch_bucket_free(PTR_CACHE(c, k, i),
478 PTR_BUCKET(c, k, i));
479 }
480
__bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,int n,bool wait)481 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
482 struct bkey *k, int n, bool wait)
483 {
484 int i;
485
486 lockdep_assert_held(&c->bucket_lock);
487 BUG_ON(!n || n > c->caches_loaded || n > 8);
488
489 bkey_init(k);
490
491 /* sort by free space/prio of oldest data in caches */
492
493 for (i = 0; i < n; i++) {
494 struct cache *ca = c->cache_by_alloc[i];
495 long b = bch_bucket_alloc(ca, reserve, wait);
496
497 if (b == -1)
498 goto err;
499
500 k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
501 bucket_to_sector(c, b),
502 ca->sb.nr_this_dev);
503
504 SET_KEY_PTRS(k, i + 1);
505 }
506
507 return 0;
508 err:
509 bch_bucket_free(c, k);
510 bkey_put(c, k);
511 return -1;
512 }
513
bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,int n,bool wait)514 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
515 struct bkey *k, int n, bool wait)
516 {
517 int ret;
518
519 mutex_lock(&c->bucket_lock);
520 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
521 mutex_unlock(&c->bucket_lock);
522 return ret;
523 }
524
525 /* Sector allocator */
526
527 struct open_bucket {
528 struct list_head list;
529 unsigned int last_write_point;
530 unsigned int sectors_free;
531 BKEY_PADDED(key);
532 };
533
534 /*
535 * We keep multiple buckets open for writes, and try to segregate different
536 * write streams for better cache utilization: first we try to segregate flash
537 * only volume write streams from cached devices, secondly we look for a bucket
538 * where the last write to it was sequential with the current write, and
539 * failing that we look for a bucket that was last used by the same task.
540 *
541 * The ideas is if you've got multiple tasks pulling data into the cache at the
542 * same time, you'll get better cache utilization if you try to segregate their
543 * data and preserve locality.
544 *
545 * For example, dirty sectors of flash only volume is not reclaimable, if their
546 * dirty sectors mixed with dirty sectors of cached device, such buckets will
547 * be marked as dirty and won't be reclaimed, though the dirty data of cached
548 * device have been written back to backend device.
549 *
550 * And say you've starting Firefox at the same time you're copying a
551 * bunch of files. Firefox will likely end up being fairly hot and stay in the
552 * cache awhile, but the data you copied might not be; if you wrote all that
553 * data to the same buckets it'd get invalidated at the same time.
554 *
555 * Both of those tasks will be doing fairly random IO so we can't rely on
556 * detecting sequential IO to segregate their data, but going off of the task
557 * should be a sane heuristic.
558 */
pick_data_bucket(struct cache_set * c,const struct bkey * search,unsigned int write_point,struct bkey * alloc)559 static struct open_bucket *pick_data_bucket(struct cache_set *c,
560 const struct bkey *search,
561 unsigned int write_point,
562 struct bkey *alloc)
563 {
564 struct open_bucket *ret, *ret_task = NULL;
565
566 list_for_each_entry_reverse(ret, &c->data_buckets, list)
567 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
568 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
569 continue;
570 else if (!bkey_cmp(&ret->key, search))
571 goto found;
572 else if (ret->last_write_point == write_point)
573 ret_task = ret;
574
575 ret = ret_task ?: list_first_entry(&c->data_buckets,
576 struct open_bucket, list);
577 found:
578 if (!ret->sectors_free && KEY_PTRS(alloc)) {
579 ret->sectors_free = c->sb.bucket_size;
580 bkey_copy(&ret->key, alloc);
581 bkey_init(alloc);
582 }
583
584 if (!ret->sectors_free)
585 ret = NULL;
586
587 return ret;
588 }
589
590 /*
591 * Allocates some space in the cache to write to, and k to point to the newly
592 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
593 * end of the newly allocated space).
594 *
595 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
596 * sectors were actually allocated.
597 *
598 * If s->writeback is true, will not fail.
599 */
bch_alloc_sectors(struct cache_set * c,struct bkey * k,unsigned int sectors,unsigned int write_point,unsigned int write_prio,bool wait)600 bool bch_alloc_sectors(struct cache_set *c,
601 struct bkey *k,
602 unsigned int sectors,
603 unsigned int write_point,
604 unsigned int write_prio,
605 bool wait)
606 {
607 struct open_bucket *b;
608 BKEY_PADDED(key) alloc;
609 unsigned int i;
610
611 /*
612 * We might have to allocate a new bucket, which we can't do with a
613 * spinlock held. So if we have to allocate, we drop the lock, allocate
614 * and then retry. KEY_PTRS() indicates whether alloc points to
615 * allocated bucket(s).
616 */
617
618 bkey_init(&alloc.key);
619 spin_lock(&c->data_bucket_lock);
620
621 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
622 unsigned int watermark = write_prio
623 ? RESERVE_MOVINGGC
624 : RESERVE_NONE;
625
626 spin_unlock(&c->data_bucket_lock);
627
628 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
629 return false;
630
631 spin_lock(&c->data_bucket_lock);
632 }
633
634 /*
635 * If we had to allocate, we might race and not need to allocate the
636 * second time we call pick_data_bucket(). If we allocated a bucket but
637 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
638 */
639 if (KEY_PTRS(&alloc.key))
640 bkey_put(c, &alloc.key);
641
642 for (i = 0; i < KEY_PTRS(&b->key); i++)
643 EBUG_ON(ptr_stale(c, &b->key, i));
644
645 /* Set up the pointer to the space we're allocating: */
646
647 for (i = 0; i < KEY_PTRS(&b->key); i++)
648 k->ptr[i] = b->key.ptr[i];
649
650 sectors = min(sectors, b->sectors_free);
651
652 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
653 SET_KEY_SIZE(k, sectors);
654 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
655
656 /*
657 * Move b to the end of the lru, and keep track of what this bucket was
658 * last used for:
659 */
660 list_move_tail(&b->list, &c->data_buckets);
661 bkey_copy_key(&b->key, k);
662 b->last_write_point = write_point;
663
664 b->sectors_free -= sectors;
665
666 for (i = 0; i < KEY_PTRS(&b->key); i++) {
667 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
668
669 atomic_long_add(sectors,
670 &PTR_CACHE(c, &b->key, i)->sectors_written);
671 }
672
673 if (b->sectors_free < c->sb.block_size)
674 b->sectors_free = 0;
675
676 /*
677 * k takes refcounts on the buckets it points to until it's inserted
678 * into the btree, but if we're done with this bucket we just transfer
679 * get_data_bucket()'s refcount.
680 */
681 if (b->sectors_free)
682 for (i = 0; i < KEY_PTRS(&b->key); i++)
683 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
684
685 spin_unlock(&c->data_bucket_lock);
686 return true;
687 }
688
689 /* Init */
690
bch_open_buckets_free(struct cache_set * c)691 void bch_open_buckets_free(struct cache_set *c)
692 {
693 struct open_bucket *b;
694
695 while (!list_empty(&c->data_buckets)) {
696 b = list_first_entry(&c->data_buckets,
697 struct open_bucket, list);
698 list_del(&b->list);
699 kfree(b);
700 }
701 }
702
bch_open_buckets_alloc(struct cache_set * c)703 int bch_open_buckets_alloc(struct cache_set *c)
704 {
705 int i;
706
707 spin_lock_init(&c->data_bucket_lock);
708
709 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
710 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
711
712 if (!b)
713 return -ENOMEM;
714
715 list_add(&b->list, &c->data_buckets);
716 }
717
718 return 0;
719 }
720
bch_cache_allocator_start(struct cache * ca)721 int bch_cache_allocator_start(struct cache *ca)
722 {
723 struct task_struct *k = kthread_run(bch_allocator_thread,
724 ca, "bcache_allocator");
725 if (IS_ERR(k))
726 return PTR_ERR(k);
727
728 ca->alloc_thread = k;
729 return 0;
730 }
731