Lines Matching refs:dc
30 static uint64_t __calc_target_rate(struct cached_dev *dc) in __calc_target_rate() argument
32 struct cache_set *c = dc->disk.c; in __calc_target_rate()
48 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, in __calc_target_rate()
52 div_u64(cache_sectors * dc->writeback_percent, 100); in __calc_target_rate()
61 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() argument
83 int64_t target = __calc_target_rate(dc); in __update_writeback_rate()
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); in __update_writeback_rate()
87 div_s64(error, dc->writeback_rate_p_term_inverse); in __update_writeback_rate()
91 if ((error < 0 && dc->writeback_rate_integral > 0) || in __update_writeback_rate()
93 dc->writeback_rate.next + NSEC_PER_MSEC))) { in __update_writeback_rate()
104 dc->writeback_rate_integral += error * in __update_writeback_rate()
105 dc->writeback_rate_update_seconds; in __update_writeback_rate()
108 integral_scaled = div_s64(dc->writeback_rate_integral, in __update_writeback_rate()
109 dc->writeback_rate_i_term_inverse); in __update_writeback_rate()
112 dc->writeback_rate_minimum, NSEC_PER_SEC); in __update_writeback_rate()
114 dc->writeback_rate_proportional = proportional_scaled; in __update_writeback_rate()
115 dc->writeback_rate_integral_scaled = integral_scaled; in __update_writeback_rate()
116 dc->writeback_rate_change = new_rate - in __update_writeback_rate()
117 atomic_long_read(&dc->writeback_rate.rate); in __update_writeback_rate()
118 atomic_long_set(&dc->writeback_rate.rate, new_rate); in __update_writeback_rate()
119 dc->writeback_rate_target = target; in __update_writeback_rate()
123 struct cached_dev *dc) in set_at_max_writeback_rate() argument
152 atomic_long_set(&dc->writeback_rate.rate, INT_MAX); in set_at_max_writeback_rate()
155 dc->writeback_rate_proportional = 0; in set_at_max_writeback_rate()
156 dc->writeback_rate_integral_scaled = 0; in set_at_max_writeback_rate()
157 dc->writeback_rate_change = 0; in set_at_max_writeback_rate()
175 struct cached_dev *dc = container_of(to_delayed_work(work), in update_writeback_rate() local
178 struct cache_set *c = dc->disk.c; in update_writeback_rate()
184 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
192 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || in update_writeback_rate()
194 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
200 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { in update_writeback_rate()
207 if (!set_at_max_writeback_rate(c, dc)) { in update_writeback_rate()
208 down_read(&dc->writeback_lock); in update_writeback_rate()
209 __update_writeback_rate(dc); in update_writeback_rate()
211 up_read(&dc->writeback_lock); in update_writeback_rate()
220 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && in update_writeback_rate()
222 schedule_delayed_work(&dc->writeback_rate_update, in update_writeback_rate()
223 dc->writeback_rate_update_seconds * HZ); in update_writeback_rate()
230 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); in update_writeback_rate()
235 static unsigned int writeback_delay(struct cached_dev *dc, in writeback_delay() argument
238 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in writeback_delay()
239 !dc->writeback_percent) in writeback_delay()
242 return bch_next_delay(&dc->writeback_rate, sectors); in writeback_delay()
247 struct cached_dev *dc; member
259 if (!io->dc->writeback_percent) in dirty_init()
278 struct cached_dev *dc = io->dc; in write_dirty_finish() local
295 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); in write_dirty_finish()
297 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); in write_dirty_finish()
303 ? &dc->disk.c->writeback_keys_failed in write_dirty_finish()
304 : &dc->disk.c->writeback_keys_done); in write_dirty_finish()
307 bch_keybuf_del(&dc->writeback_keys, w); in write_dirty_finish()
308 up(&dc->in_flight); in write_dirty_finish()
320 bch_count_backing_io_errors(io->dc, bio); in dirty_endio()
330 struct cached_dev *dc = io->dc; in write_dirty() local
334 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) { in write_dirty()
336 closure_wait(&dc->writeback_ordering_wait, cl); in write_dirty()
338 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) { in write_dirty()
343 closure_wake_up(&dc->writeback_ordering_wait); in write_dirty()
346 continue_at(cl, write_dirty, io->dc->writeback_write_wq); in write_dirty()
362 bio_set_dev(&io->bio, io->dc->bdev); in write_dirty()
366 closure_bio_submit(io->dc->disk.c, &io->bio, cl); in write_dirty()
369 atomic_set(&dc->writeback_sequence_next, next_sequence); in write_dirty()
370 closure_wake_up(&dc->writeback_ordering_wait); in write_dirty()
372 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); in write_dirty()
381 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), in read_dirty_endio()
392 closure_bio_submit(io->dc->disk.c, &io->bio, cl); in read_dirty_submit()
394 continue_at(cl, write_dirty, io->dc->writeback_write_wq); in read_dirty_submit()
397 static void read_dirty(struct cached_dev *dc) in read_dirty() argument
407 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list)); in read_dirty()
408 atomic_set(&dc->writeback_sequence_next, sequence); in read_dirty()
416 next = bch_keybuf_next(&dc->writeback_keys); in read_dirty()
419 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && in read_dirty()
425 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); in read_dirty()
456 } while ((next = bch_keybuf_next(&dc->writeback_keys))); in read_dirty()
469 io->dc = dc; in read_dirty()
476 PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); in read_dirty()
484 down(&dc->in_flight); in read_dirty()
494 delay = writeback_delay(dc, size); in read_dirty()
497 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && in read_dirty()
500 delay = writeback_delay(dc, 0); in read_dirty()
508 bch_keybuf_del(&dc->writeback_keys, w); in read_dirty()
564 struct cached_dev *dc = container_of(buf, in dirty_pred() local
568 BUG_ON(KEY_INODE(k) != dc->disk.id); in dirty_pred()
573 static void refill_full_stripes(struct cached_dev *dc) in refill_full_stripes() argument
575 struct keybuf *buf = &dc->writeback_keys; in refill_full_stripes()
580 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); in refill_full_stripes()
587 stripe = find_next_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
588 dc->disk.nr_stripes, stripe); in refill_full_stripes()
590 if (stripe == dc->disk.nr_stripes) in refill_full_stripes()
593 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, in refill_full_stripes()
594 dc->disk.nr_stripes, stripe); in refill_full_stripes()
596 buf->last_scanned = KEY(dc->disk.id, in refill_full_stripes()
597 stripe * dc->disk.stripe_size, 0); in refill_full_stripes()
599 bch_refill_keybuf(dc->disk.c, buf, in refill_full_stripes()
600 &KEY(dc->disk.id, in refill_full_stripes()
601 next_stripe * dc->disk.stripe_size, 0), in refill_full_stripes()
612 if (stripe == dc->disk.nr_stripes) { in refill_full_stripes()
622 static bool refill_dirty(struct cached_dev *dc) in refill_dirty() argument
624 struct keybuf *buf = &dc->writeback_keys; in refill_dirty()
625 struct bkey start = KEY(dc->disk.id, 0, 0); in refill_dirty()
626 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); in refill_dirty()
638 if (dc->partial_stripes_expensive) { in refill_dirty()
639 refill_full_stripes(dc); in refill_dirty()
645 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); in refill_dirty()
655 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); in refill_dirty()
662 struct cached_dev *dc = arg; in bch_writeback_thread() local
663 struct cache_set *c = dc->disk.c; in bch_writeback_thread()
666 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
670 down_write(&dc->writeback_lock); in bch_writeback_thread()
679 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && in bch_writeback_thread()
680 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { in bch_writeback_thread()
681 up_write(&dc->writeback_lock); in bch_writeback_thread()
694 searched_full_index = refill_dirty(dc); in bch_writeback_thread()
697 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { in bch_writeback_thread()
698 atomic_set(&dc->has_dirty, 0); in bch_writeback_thread()
699 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); in bch_writeback_thread()
700 bch_write_bdev_super(dc, NULL); in bch_writeback_thread()
707 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { in bch_writeback_thread()
708 up_write(&dc->writeback_lock); in bch_writeback_thread()
730 up_write(&dc->writeback_lock); in bch_writeback_thread()
732 read_dirty(dc); in bch_writeback_thread()
735 unsigned int delay = dc->writeback_delay * HZ; in bch_writeback_thread()
740 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) in bch_writeback_thread()
743 bch_ratelimit_reset(&dc->writeback_rate); in bch_writeback_thread()
747 if (dc->writeback_write_wq) { in bch_writeback_thread()
748 flush_workqueue(dc->writeback_write_wq); in bch_writeback_thread()
749 destroy_workqueue(dc->writeback_write_wq); in bch_writeback_thread()
751 cached_dev_put(dc); in bch_writeback_thread()
963 void bch_cached_dev_writeback_init(struct cached_dev *dc) in bch_cached_dev_writeback_init() argument
965 sema_init(&dc->in_flight, 64); in bch_cached_dev_writeback_init()
966 init_rwsem(&dc->writeback_lock); in bch_cached_dev_writeback_init()
967 bch_keybuf_init(&dc->writeback_keys); in bch_cached_dev_writeback_init()
969 dc->writeback_metadata = true; in bch_cached_dev_writeback_init()
970 dc->writeback_running = false; in bch_cached_dev_writeback_init()
971 dc->writeback_percent = 10; in bch_cached_dev_writeback_init()
972 dc->writeback_delay = 30; in bch_cached_dev_writeback_init()
973 atomic_long_set(&dc->writeback_rate.rate, 1024); in bch_cached_dev_writeback_init()
974 dc->writeback_rate_minimum = 8; in bch_cached_dev_writeback_init()
976 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; in bch_cached_dev_writeback_init()
977 dc->writeback_rate_p_term_inverse = 40; in bch_cached_dev_writeback_init()
978 dc->writeback_rate_i_term_inverse = 10000; in bch_cached_dev_writeback_init()
980 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); in bch_cached_dev_writeback_init()
981 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); in bch_cached_dev_writeback_init()
984 int bch_cached_dev_writeback_start(struct cached_dev *dc) in bch_cached_dev_writeback_start() argument
986 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", in bch_cached_dev_writeback_start()
988 if (!dc->writeback_write_wq) in bch_cached_dev_writeback_start()
991 cached_dev_get(dc); in bch_cached_dev_writeback_start()
992 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, in bch_cached_dev_writeback_start()
994 if (IS_ERR(dc->writeback_thread)) { in bch_cached_dev_writeback_start()
995 cached_dev_put(dc); in bch_cached_dev_writeback_start()
996 destroy_workqueue(dc->writeback_write_wq); in bch_cached_dev_writeback_start()
997 return PTR_ERR(dc->writeback_thread); in bch_cached_dev_writeback_start()
999 dc->writeback_running = true; in bch_cached_dev_writeback_start()
1001 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); in bch_cached_dev_writeback_start()
1002 schedule_delayed_work(&dc->writeback_rate_update, in bch_cached_dev_writeback_start()
1003 dc->writeback_rate_update_seconds * HZ); in bch_cached_dev_writeback_start()
1005 bch_writeback_queue(dc); in bch_cached_dev_writeback_start()