Lines Matching +full:dc +full:- +full:to +full:- +full:dc
1 // SPDX-License-Identifier: GPL-2.0
3 * background writeback - scan btree for dirty data and write it to the backing
22 if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) || in update_gc_after_writeback()
23 c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD) in update_gc_after_writeback()
26 c->gc_after_writeback |= BCH_DO_AUTO_GC; in update_gc_after_writeback()
30 static uint64_t __calc_target_rate(struct cached_dev *dc) in __calc_target_rate() argument
32 struct cache_set *c = dc->disk.c; in __calc_target_rate()
36 * flash-only devices in __calc_target_rate()
38 uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size - in __calc_target_rate()
39 atomic_long_read(&c->flash_dev_dirty_sectors); in __calc_target_rate()
48 div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, in __calc_target_rate()
49 c->cached_dev_sectors); in __calc_target_rate()
52 div_u64(cache_sectors * dc->writeback_percent, 100); in __calc_target_rate()
61 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() argument
72 * based on configured values. These are stored as inverses to in __update_writeback_rate()
73 * avoid fixed point math and to make configuration easy-- e.g. in __update_writeback_rate()
75 * attempts to write at a rate that would retire all the dirty in __update_writeback_rate()
80 * This acts as a slow, long-term average that is not subject to in __update_writeback_rate()
83 int64_t target = __calc_target_rate(dc); in __update_writeback_rate()
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); in __update_writeback_rate()
85 int64_t error = dirty - target; in __update_writeback_rate()
87 div_s64(error, dc->writeback_rate_p_term_inverse); in __update_writeback_rate()
92 * We need to consider the number of dirty buckets as well in __update_writeback_rate()
97 * dirty data is still not even reached to writeback percent, so the rate in __update_writeback_rate()
99 * stuck at a non-writeback mode. in __update_writeback_rate()
101 struct cache_set *c = dc->disk.c; in __update_writeback_rate()
103 int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets; in __update_writeback_rate()
105 if (dc->writeback_consider_fragment && in __update_writeback_rate()
106 c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) { in __update_writeback_rate()
108 div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); in __update_writeback_rate()
112 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) { in __update_writeback_rate()
113 fp_term = (int64_t)dc->writeback_rate_fp_term_low * in __update_writeback_rate()
114 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW); in __update_writeback_rate()
115 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) { in __update_writeback_rate()
116 fp_term = (int64_t)dc->writeback_rate_fp_term_mid * in __update_writeback_rate()
117 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID); in __update_writeback_rate()
119 fp_term = (int64_t)dc->writeback_rate_fp_term_high * in __update_writeback_rate()
120 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH); in __update_writeback_rate()
129 if ((error < 0 && dc->writeback_rate_integral > 0) || in __update_writeback_rate()
131 dc->writeback_rate.next + NSEC_PER_MSEC))) { in __update_writeback_rate()
138 * It's necessary to scale this by in __update_writeback_rate()
139 * writeback_rate_update_seconds to keep the integral in __update_writeback_rate()
142 dc->writeback_rate_integral += error * in __update_writeback_rate()
143 dc->writeback_rate_update_seconds; in __update_writeback_rate()
146 integral_scaled = div_s64(dc->writeback_rate_integral, in __update_writeback_rate()
147 dc->writeback_rate_i_term_inverse); in __update_writeback_rate()
150 dc->writeback_rate_minimum, NSEC_PER_SEC); in __update_writeback_rate()
152 dc->writeback_rate_proportional = proportional_scaled; in __update_writeback_rate()
153 dc->writeback_rate_integral_scaled = integral_scaled; in __update_writeback_rate()
154 dc->writeback_rate_change = new_rate - in __update_writeback_rate()
155 atomic_long_read(&dc->writeback_rate.rate); in __update_writeback_rate()
156 atomic_long_set(&dc->writeback_rate.rate, new_rate); in __update_writeback_rate()
157 dc->writeback_rate_target = target; in __update_writeback_rate()
165 * If c->idle_counter is overflow (idel for really long time), in idle_counter_exceeded()
169 counter = atomic_inc_return(&c->idle_counter); in idle_counter_exceeded()
171 atomic_set(&c->idle_counter, 0); in idle_counter_exceeded()
175 dev_nr = atomic_read(&c->attached_dev_nr); in idle_counter_exceeded()
180 * c->idle_counter is increased by writeback thread of all in idle_counter_exceeded()
181 * attached backing devices, in order to represent a rough in idle_counter_exceeded()
185 * The following calculation equals to checking in idle_counter_exceeded()
196 * called. If all backing devices attached to the same cache set have
197 * identical dc->writeback_rate_update_seconds values, it is about 6
199 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
200 * to each dc->writeback_rate.rate.
201 * In order to avoid extra locking cost for counting exact dirty cached
202 * devices number, c->attached_dev_nr is used to calculate the idle
203 * throushold. It might be bigger if not all cached device are in write-
208 struct cached_dev *dc) in set_at_max_writeback_rate() argument
211 if (!c->idle_max_writeback_rate_enabled) in set_at_max_writeback_rate()
215 if (!c->gc_mark_valid) in set_at_max_writeback_rate()
221 if (atomic_read(&c->at_max_writeback_rate) != 1) in set_at_max_writeback_rate()
222 atomic_set(&c->at_max_writeback_rate, 1); in set_at_max_writeback_rate()
224 atomic_long_set(&dc->writeback_rate.rate, INT_MAX); in set_at_max_writeback_rate()
227 dc->writeback_rate_proportional = 0; in set_at_max_writeback_rate()
228 dc->writeback_rate_integral_scaled = 0; in set_at_max_writeback_rate()
229 dc->writeback_rate_change = 0; in set_at_max_writeback_rate()
236 !atomic_read(&c->at_max_writeback_rate)) in set_at_max_writeback_rate()
244 struct cached_dev *dc = container_of(to_delayed_work(work), in update_writeback_rate() local
247 struct cache_set *c = dc->disk.c; in update_writeback_rate()
253 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
261 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || in update_writeback_rate()
262 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { in update_writeback_rate()
263 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
271 * will set writeback rate to a max number. Then it is in update_writeback_rate()
272 * unncessary to update writeback rate for an idle cache set in update_writeback_rate()
275 if (atomic_read(&dc->has_dirty) && dc->writeback_percent && in update_writeback_rate()
276 !set_at_max_writeback_rate(c, dc)) { in update_writeback_rate()
278 if (!down_read_trylock((&dc->writeback_lock))) { in update_writeback_rate()
279 dc->rate_update_retry++; in update_writeback_rate()
280 if (dc->rate_update_retry <= in update_writeback_rate()
283 down_read(&dc->writeback_lock); in update_writeback_rate()
284 dc->rate_update_retry = 0; in update_writeback_rate()
286 __update_writeback_rate(dc); in update_writeback_rate()
288 up_read(&dc->writeback_lock); in update_writeback_rate()
297 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && in update_writeback_rate()
298 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { in update_writeback_rate()
299 schedule_delayed_work(&dc->writeback_rate_update, in update_writeback_rate()
300 dc->writeback_rate_update_seconds * HZ); in update_writeback_rate()
307 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
312 static unsigned int writeback_delay(struct cached_dev *dc, in writeback_delay() argument
315 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in writeback_delay()
316 !dc->writeback_percent) in writeback_delay()
319 return bch_next_delay(&dc->writeback_rate, sectors); in writeback_delay()
324 struct cached_dev *dc; member
331 struct dirty_io *io = w->private; in dirty_init()
332 struct bio *bio = &io->bio; in dirty_init()
334 bio_init(bio, NULL, bio->bi_inline_vecs, in dirty_init()
335 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0); in dirty_init()
336 if (!io->dc->writeback_percent) in dirty_init()
339 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; in dirty_init()
340 bio->bi_private = w; in dirty_init()
354 struct keybuf_key *w = io->bio.bi_private; in write_dirty_finish()
355 struct cached_dev *dc = io->dc; in write_dirty_finish() local
357 bio_free_pages(&io->bio); in write_dirty_finish()
360 if (KEY_DIRTY(&w->key)) { in write_dirty_finish()
367 bkey_copy(keys.top, &w->key); in write_dirty_finish()
371 for (i = 0; i < KEY_PTRS(&w->key); i++) in write_dirty_finish()
372 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); in write_dirty_finish()
374 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); in write_dirty_finish()
377 trace_bcache_writeback_collision(&w->key); in write_dirty_finish()
380 ? &dc->disk.c->writeback_keys_failed in write_dirty_finish()
381 : &dc->disk.c->writeback_keys_done); in write_dirty_finish()
384 bch_keybuf_del(&dc->writeback_keys, w); in write_dirty_finish()
385 up(&dc->in_flight); in write_dirty_finish()
392 struct keybuf_key *w = bio->bi_private; in dirty_endio()
393 struct dirty_io *io = w->private; in dirty_endio()
395 if (bio->bi_status) { in dirty_endio()
396 SET_KEY_DIRTY(&w->key, false); in dirty_endio()
397 bch_count_backing_io_errors(io->dc, bio); in dirty_endio()
400 closure_put(&io->cl); in dirty_endio()
406 struct keybuf_key *w = io->bio.bi_private; in write_dirty()
407 struct cached_dev *dc = io->dc; in write_dirty() local
411 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) { in write_dirty()
412 /* Not our turn to write; wait for a write to complete */ in write_dirty()
413 closure_wait(&dc->writeback_ordering_wait, cl); in write_dirty()
415 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) { in write_dirty()
417 * Edge case-- it happened in indeterminate order in write_dirty()
418 * relative to when we were added to wait list.. in write_dirty()
420 closure_wake_up(&dc->writeback_ordering_wait); in write_dirty()
423 continue_at(cl, write_dirty, io->dc->writeback_write_wq); in write_dirty()
427 next_sequence = io->sequence + 1; in write_dirty()
431 * If we failed to read, we should not attempt to write to the in write_dirty()
432 * backing device. Instead, immediately go to write_dirty_finish in write_dirty()
433 * to clean up. in write_dirty()
435 if (KEY_DIRTY(&w->key)) { in write_dirty()
437 io->bio.bi_opf = REQ_OP_WRITE; in write_dirty()
438 io->bio.bi_iter.bi_sector = KEY_START(&w->key); in write_dirty()
439 bio_set_dev(&io->bio, io->dc->bdev); in write_dirty()
440 io->bio.bi_end_io = dirty_endio; in write_dirty()
442 /* I/O request sent to backing device */ in write_dirty()
443 closure_bio_submit(io->dc->disk.c, &io->bio, cl); in write_dirty()
446 atomic_set(&dc->writeback_sequence_next, next_sequence); in write_dirty()
447 closure_wake_up(&dc->writeback_ordering_wait); in write_dirty()
449 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); in write_dirty()
454 struct keybuf_key *w = bio->bi_private; in read_dirty_endio()
455 struct dirty_io *io = w->private; in read_dirty_endio()
458 bch_count_io_errors(io->dc->disk.c->cache, in read_dirty_endio()
459 bio->bi_status, 1, in read_dirty_endio()
469 closure_bio_submit(io->dc->disk.c, &io->bio, cl); in read_dirty_submit()
471 continue_at(cl, write_dirty, io->dc->writeback_write_wq); in read_dirty_submit()
474 static void read_dirty(struct cached_dev *dc) in read_dirty() argument
484 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list)); in read_dirty()
485 atomic_set(&dc->writeback_sequence_next, sequence); in read_dirty()
493 next = bch_keybuf_next(&dc->writeback_keys); in read_dirty()
496 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && in read_dirty()
502 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); in read_dirty()
519 * Operations are only eligible to be combined in read_dirty()
522 * TODO: add a heuristic willing to fire a in read_dirty()
523 * certain amount of non-contiguous IO per pass, in read_dirty()
527 if ((nk != 0) && bkey_cmp(&keys[nk-1]->key, in read_dirty()
528 &START_KEY(&next->key))) in read_dirty()
531 size += KEY_SIZE(&next->key); in read_dirty()
533 } while ((next = bch_keybuf_next(&dc->writeback_keys))); in read_dirty()
535 /* Now we have gathered a set of 1..5 keys to write back. */ in read_dirty()
540 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)), in read_dirty()
545 w->private = io; in read_dirty()
546 io->dc = dc; in read_dirty()
547 io->sequence = sequence++; in read_dirty()
550 io->bio.bi_opf = REQ_OP_READ; in read_dirty()
551 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); in read_dirty()
552 bio_set_dev(&io->bio, dc->disk.c->cache->bdev); in read_dirty()
553 io->bio.bi_end_io = read_dirty_endio; in read_dirty()
555 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) in read_dirty()
558 trace_bcache_writeback(&w->key); in read_dirty()
560 down(&dc->in_flight); in read_dirty()
567 closure_call(&io->cl, read_dirty_submit, NULL, &cl); in read_dirty()
570 delay = writeback_delay(dc, size); in read_dirty()
573 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && in read_dirty()
576 delay = writeback_delay(dc, 0); in read_dirty()
582 kfree(w->private); in read_dirty()
584 bch_keybuf_del(&dc->writeback_keys, w); in read_dirty()
588 * Wait for outstanding writeback IOs to finish (and keybuf slots to be in read_dirty()
599 struct bcache_device *d = c->devices[inode]; in bcache_dev_sectors_dirty_add()
610 if (UUID_FLASH_ONLY(&c->uuids[inode])) in bcache_dev_sectors_dirty_add()
611 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); in bcache_dev_sectors_dirty_add()
613 stripe_offset = offset & (d->stripe_size - 1); in bcache_dev_sectors_dirty_add()
617 d->stripe_size - stripe_offset); in bcache_dev_sectors_dirty_add()
620 s = -s; in bcache_dev_sectors_dirty_add()
622 if (stripe >= d->nr_stripes) in bcache_dev_sectors_dirty_add()
626 d->stripe_sectors_dirty + stripe); in bcache_dev_sectors_dirty_add()
627 if (sectors_dirty == d->stripe_size) { in bcache_dev_sectors_dirty_add()
628 if (!test_bit(stripe, d->full_dirty_stripes)) in bcache_dev_sectors_dirty_add()
629 set_bit(stripe, d->full_dirty_stripes); in bcache_dev_sectors_dirty_add()
631 if (test_bit(stripe, d->full_dirty_stripes)) in bcache_dev_sectors_dirty_add()
632 clear_bit(stripe, d->full_dirty_stripes); in bcache_dev_sectors_dirty_add()
635 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
643 struct cached_dev *dc = container_of(buf, in dirty_pred() local
647 BUG_ON(KEY_INODE(k) != dc->disk.id); in dirty_pred()
652 static void refill_full_stripes(struct cached_dev *dc) in refill_full_stripes() argument
654 struct keybuf *buf = &dc->writeback_keys; in refill_full_stripes()
659 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); in refill_full_stripes()
666 stripe = find_next_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
667 dc->disk.nr_stripes, stripe); in refill_full_stripes()
669 if (stripe == dc->disk.nr_stripes) in refill_full_stripes()
672 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
673 dc->disk.nr_stripes, stripe); in refill_full_stripes()
675 buf->last_scanned = KEY(dc->disk.id, in refill_full_stripes()
676 stripe * dc->disk.stripe_size, 0); in refill_full_stripes()
678 bch_refill_keybuf(dc->disk.c, buf, in refill_full_stripes()
679 &KEY(dc->disk.id, in refill_full_stripes()
680 next_stripe * dc->disk.stripe_size, 0), in refill_full_stripes()
683 if (array_freelist_empty(&buf->freelist)) in refill_full_stripes()
691 if (stripe == dc->disk.nr_stripes) { in refill_full_stripes()
701 static bool refill_dirty(struct cached_dev *dc) in refill_dirty() argument
703 struct keybuf *buf = &dc->writeback_keys; in refill_dirty()
704 struct bkey start = KEY(dc->disk.id, 0, 0); in refill_dirty()
705 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); in refill_dirty()
709 * make sure keybuf pos is inside the range for this disk - at bringup in refill_dirty()
713 if (bkey_cmp(&buf->last_scanned, &start) < 0 || in refill_dirty()
714 bkey_cmp(&buf->last_scanned, &end) > 0) in refill_dirty()
715 buf->last_scanned = start; in refill_dirty()
717 if (dc->partial_stripes_expensive) { in refill_dirty()
718 refill_full_stripes(dc); in refill_dirty()
719 if (array_freelist_empty(&buf->freelist)) in refill_dirty()
723 start_pos = buf->last_scanned; in refill_dirty()
724 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); in refill_dirty()
726 if (bkey_cmp(&buf->last_scanned, &end) < 0) in refill_dirty()
730 * If we get to the end start scanning again from the beginning, and in refill_dirty()
731 * only scan up to where we initially started scanning from: in refill_dirty()
733 buf->last_scanned = start; in refill_dirty()
734 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); in refill_dirty()
736 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; in refill_dirty()
741 struct cached_dev *dc = arg; in bch_writeback_thread() local
742 struct cache_set *c = dc->disk.c; in bch_writeback_thread()
745 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
748 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { in bch_writeback_thread()
749 down_write(&dc->writeback_lock); in bch_writeback_thread()
753 * to perform writeback. Otherwise, if no dirty data on cache, in bch_writeback_thread()
756 * to wake up it. in bch_writeback_thread()
758 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && in bch_writeback_thread()
759 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { in bch_writeback_thread()
760 up_write(&dc->writeback_lock); in bch_writeback_thread()
763 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { in bch_writeback_thread()
773 searched_full_index = refill_dirty(dc); in bch_writeback_thread()
776 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { in bch_writeback_thread()
777 atomic_set(&dc->has_dirty, 0); in bch_writeback_thread()
778 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); in bch_writeback_thread()
779 bch_write_bdev_super(dc, NULL); in bch_writeback_thread()
786 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { in bch_writeback_thread()
790 memset(&dc->sb.set_uuid, 0, 16); in bch_writeback_thread()
791 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); in bch_writeback_thread()
793 bch_write_bdev_super(dc, &cl); in bch_writeback_thread()
796 up_write(&dc->writeback_lock); in bch_writeback_thread()
811 if (c->gc_after_writeback == in bch_writeback_thread()
813 c->gc_after_writeback &= ~BCH_DO_AUTO_GC; in bch_writeback_thread()
818 up_write(&dc->writeback_lock); in bch_writeback_thread()
820 read_dirty(dc); in bch_writeback_thread()
823 unsigned int delay = dc->writeback_delay * HZ; in bch_writeback_thread()
827 !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && in bch_writeback_thread()
828 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) in bch_writeback_thread()
831 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
835 if (dc->writeback_write_wq) in bch_writeback_thread()
836 destroy_workqueue(dc->writeback_write_wq); in bch_writeback_thread()
838 cached_dev_put(dc); in bch_writeback_thread()
858 if (KEY_INODE(k) > op->inode) in sectors_dirty_init_fn()
862 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), in sectors_dirty_init_fn()
865 op->count++; in sectors_dirty_init_fn()
866 if (!(op->count % INIT_KEYS_EACH_TIME)) in sectors_dirty_init_fn()
879 bch_btree_op_init(&op.op, -1); in bch_root_node_dirty_init()
880 op.inode = d->id; in bch_root_node_dirty_init()
885 c->root, in bch_root_node_dirty_init()
894 * The op may be added to cache_set's btree_cache_wait in bch_root_node_dirty_init()
901 finish_wait(&c->btree_cache_wait, &(&op.op)->wait); in bch_root_node_dirty_init()
909 struct bch_dirty_init_state *state = info->state; in bch_dirty_init_thread()
910 struct cache_set *c = state->c; in bch_dirty_init_thread()
918 bch_btree_iter_init(&c->root->keys, &iter, NULL); in bch_dirty_init_thread()
919 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); in bch_dirty_init_thread()
925 spin_lock(&state->idx_lock); in bch_dirty_init_thread()
926 cur_idx = state->key_idx; in bch_dirty_init_thread()
927 state->key_idx++; in bch_dirty_init_thread()
928 spin_unlock(&state->idx_lock); in bch_dirty_init_thread()
930 skip_nr = cur_idx - prev_idx; in bch_dirty_init_thread()
934 &c->root->keys, in bch_dirty_init_thread()
939 atomic_set(&state->enough, 1); in bch_dirty_init_thread()
940 /* Update state->enough earlier */ in bch_dirty_init_thread()
944 skip_nr--; in bch_dirty_init_thread()
948 if (bch_root_node_dirty_init(c, state->d, p) < 0) in bch_dirty_init_thread()
957 /* In order to wake up state->wait in time */ in bch_dirty_init_thread()
959 if (atomic_dec_and_test(&state->started)) in bch_dirty_init_thread()
960 wake_up(&state->wait); in bch_dirty_init_thread()
983 struct cache_set *c = d->c; in bch_sectors_dirty_init()
987 rw_lock(0, c->root, c->root->level); in bch_sectors_dirty_init()
988 if (c->root->level == 0) { in bch_sectors_dirty_init()
989 bch_btree_op_init(&op.op, -1); in bch_sectors_dirty_init()
990 op.inode = d->id; in bch_sectors_dirty_init()
993 for_each_key_filter(&c->root->keys, in bch_sectors_dirty_init()
995 sectors_dirty_init_fn(&op.op, c->root, k); in bch_sectors_dirty_init()
997 rw_unlock(0, c->root); in bch_sectors_dirty_init()
1022 pr_err("fails to run thread bch_dirty_init[%d]\n", i); in bch_sectors_dirty_init()
1023 for (--i; i >= 0; i--) in bch_sectors_dirty_init()
1031 /* Must wait for all threads to stop. */ in bch_sectors_dirty_init()
1033 rw_unlock(0, c->root); in bch_sectors_dirty_init()
1036 void bch_cached_dev_writeback_init(struct cached_dev *dc) in bch_cached_dev_writeback_init() argument
1038 sema_init(&dc->in_flight, 64); in bch_cached_dev_writeback_init()
1039 init_rwsem(&dc->writeback_lock); in bch_cached_dev_writeback_init()
1040 bch_keybuf_init(&dc->writeback_keys); in bch_cached_dev_writeback_init()
1042 dc->writeback_metadata = true; in bch_cached_dev_writeback_init()
1043 dc->writeback_running = false; in bch_cached_dev_writeback_init()
1044 dc->writeback_consider_fragment = true; in bch_cached_dev_writeback_init()
1045 dc->writeback_percent = 10; in bch_cached_dev_writeback_init()
1046 dc->writeback_delay = 30; in bch_cached_dev_writeback_init()
1047 atomic_long_set(&dc->writeback_rate.rate, 1024); in bch_cached_dev_writeback_init()
1048 dc->writeback_rate_minimum = 8; in bch_cached_dev_writeback_init()
1050 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; in bch_cached_dev_writeback_init()
1051 dc->writeback_rate_p_term_inverse = 40; in bch_cached_dev_writeback_init()
1052 dc->writeback_rate_fp_term_low = 1; in bch_cached_dev_writeback_init()
1053 dc->writeback_rate_fp_term_mid = 10; in bch_cached_dev_writeback_init()
1054 dc->writeback_rate_fp_term_high = 1000; in bch_cached_dev_writeback_init()
1055 dc->writeback_rate_i_term_inverse = 10000; in bch_cached_dev_writeback_init()
1057 /* For dc->writeback_lock contention in update_writeback_rate() */ in bch_cached_dev_writeback_init()
1058 dc->rate_update_retry = 0; in bch_cached_dev_writeback_init()
1060 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); in bch_cached_dev_writeback_init()
1061 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); in bch_cached_dev_writeback_init()
1064 int bch_cached_dev_writeback_start(struct cached_dev *dc) in bch_cached_dev_writeback_start() argument
1066 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", in bch_cached_dev_writeback_start()
1068 if (!dc->writeback_write_wq) in bch_cached_dev_writeback_start()
1069 return -ENOMEM; in bch_cached_dev_writeback_start()
1071 cached_dev_get(dc); in bch_cached_dev_writeback_start()
1072 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, in bch_cached_dev_writeback_start()
1074 if (IS_ERR(dc->writeback_thread)) { in bch_cached_dev_writeback_start()
1075 cached_dev_put(dc); in bch_cached_dev_writeback_start()
1076 destroy_workqueue(dc->writeback_write_wq); in bch_cached_dev_writeback_start()
1077 return PTR_ERR(dc->writeback_thread); in bch_cached_dev_writeback_start()
1079 dc->writeback_running = true; in bch_cached_dev_writeback_start()
1081 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); in bch_cached_dev_writeback_start()
1082 schedule_delayed_work(&dc->writeback_rate_update, in bch_cached_dev_writeback_start()
1083 dc->writeback_rate_update_seconds * HZ); in bch_cached_dev_writeback_start()
1085 bch_writeback_queue(dc); in bch_cached_dev_writeback_start()