Lines Matching +full:lock +full:- +full:latency +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0
3 * Block rq-qos base io controller
7 * - It's bio based, so the latency covers the whole block layer in addition to
9 * - We will throttle all IO that comes in here if we need to.
10 * - We use the mean latency over the 100ms window. This is because writes can
13 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
17 * The hierarchy works like the cpu controller does, we track the latency at
19 * queue depth. This means that we only care about our latency targets at the
32 * an average latency of 5ms. If it does then we will throttle the "slow"
44 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
52 * down more then we induce a latency at userspace return. We accumulate the
55 * total_time += min_lat_nsec - actual_io_completion
69 #include <linux/backing-dev.h>
76 #include <linux/blk-mq.h>
77 #include "blk-rq-qos.h"
78 #include "blk-stat.h"
99 return atomic_read(&blkiolat->enabled) > 0; in blk_iolatency_enabled()
103 spinlock_t lock; member
108 /* The latency that we missed. */
114 /* The guy who actually changed the latency numbers. */
145 /* total running average of our io latency. */
158 * These are the constants used to fake the fixed-point moving average
160 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
162 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
168 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
170 2045, // exp(1/600) - 600 samples
171 2039, // exp(1/240) - 240 samples
172 2031, // exp(1/120) - 120 samples
173 2023, // exp(1/80) - 80 samples
174 2014, // exp(1/60) - 60 samples
189 return pd_to_blkg(&iolat->pd); in lat_to_blkg()
195 if (iolat->ssd) { in latency_stat_init()
196 stat->ps.total = 0; in latency_stat_init()
197 stat->ps.missed = 0; in latency_stat_init()
199 blk_rq_stat_init(&stat->rqs); in latency_stat_init()
206 if (iolat->ssd) { in latency_stat_sum()
207 sum->ps.total += stat->ps.total; in latency_stat_sum()
208 sum->ps.missed += stat->ps.missed; in latency_stat_sum()
210 blk_rq_stat_sum(&sum->rqs, &stat->rqs); in latency_stat_sum()
216 struct latency_stat *stat = get_cpu_ptr(iolat->stats); in latency_stat_record_time()
217 if (iolat->ssd) { in latency_stat_record_time()
218 if (req_time >= iolat->min_lat_nsec) in latency_stat_record_time()
219 stat->ps.missed++; in latency_stat_record_time()
220 stat->ps.total++; in latency_stat_record_time()
222 blk_rq_stat_add(&stat->rqs, req_time); in latency_stat_record_time()
229 if (iolat->ssd) { in latency_sum_ok()
230 u64 thresh = div64_u64(stat->ps.total, 10); in latency_sum_ok()
232 return stat->ps.missed < thresh; in latency_sum_ok()
234 return stat->rqs.mean <= iolat->min_lat_nsec; in latency_sum_ok()
240 if (iolat->ssd) in latency_stat_samples()
241 return stat->ps.total; in latency_stat_samples()
242 return stat->rqs.nr_samples; in latency_stat_samples()
250 if (iolat->ssd) in iolat_update_total_lat_avg()
255 * Because we are using this for IO time in ns, the values stored in iolat_update_total_lat_avg()
260 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, in iolat_update_total_lat_avg()
261 div64_u64(iolat->cur_win_nsec, in iolat_update_total_lat_avg()
263 iolat->lat_avg = calc_load(iolat->lat_avg, in iolat_update_total_lat_avg()
265 stat->rqs.mean); in iolat_update_total_lat_avg()
270 atomic_dec(&rqw->inflight); in iolat_cleanup_cb()
271 wake_up(&rqw->wait); in iolat_cleanup_cb()
277 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); in iolat_acquire_inflight()
285 struct rq_wait *rqw = &iolat->rq_wait; in __blkcg_iolatency_throttle()
286 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); in __blkcg_iolatency_throttle()
289 blkcg_schedule_throttle(rqos->q, use_memdelay); in __blkcg_iolatency_throttle()
299 atomic_inc(&rqw->inflight); in __blkcg_iolatency_throttle()
327 unsigned long qd = blkiolat->rqos.q->nr_requests; in scale_cookie_change()
329 unsigned long old = atomic_read(&lat_info->scale_cookie); in scale_cookie_change()
334 diff = DEFAULT_SCALE_COOKIE - old; in scale_cookie_change()
338 atomic_set(&lat_info->scale_cookie, in scale_cookie_change()
341 atomic_inc(&lat_info->scale_cookie); in scale_cookie_change()
343 atomic_add(scale, &lat_info->scale_cookie); in scale_cookie_change()
353 atomic_dec(&lat_info->scale_cookie); in scale_cookie_change()
355 atomic_sub(scale, &lat_info->scale_cookie); in scale_cookie_change()
367 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; in scale_change()
369 unsigned long old = iolat->rq_depth.max_depth; in scale_change()
381 iolat->rq_depth.max_depth = old; in scale_change()
382 wake_up_all(&iolat->rq_wait.wait); in scale_change()
386 iolat->rq_depth.max_depth = max(old, 1UL); in scale_change()
396 unsigned int our_cookie = atomic_read(&iolat->scale_cookie); in check_scale_change()
401 if (lat_to_blkg(iolat)->parent == NULL) in check_scale_change()
404 parent = blkg_to_lat(lat_to_blkg(iolat)->parent); in check_scale_change()
408 lat_info = &parent->child_lat; in check_scale_change()
409 cur_cookie = atomic_read(&lat_info->scale_cookie); in check_scale_change()
410 scale_lat = READ_ONCE(lat_info->scale_lat); in check_scale_change()
413 direction = -1; in check_scale_change()
419 old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie); in check_scale_change()
425 if (direction < 0 && iolat->min_lat_nsec) { in check_scale_change()
428 if (!scale_lat || iolat->min_lat_nsec <= scale_lat) in check_scale_change()
437 samples_thresh = lat_info->nr_samples * 5; in check_scale_change()
439 if (iolat->nr_samples <= samples_thresh) in check_scale_change()
444 if (iolat->rq_depth.max_depth == 1 && direction < 0) { in check_scale_change()
452 iolat->rq_depth.max_depth = UINT_MAX; in check_scale_change()
453 wake_up_all(&iolat->rq_wait.wait); in check_scale_change()
463 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle()
469 while (blkg && blkg->parent) { in blkcg_iolatency_throttle()
472 blkg = blkg->parent; in blkcg_iolatency_throttle()
478 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); in blkcg_iolatency_throttle()
479 blkg = blkg->parent; in blkcg_iolatency_throttle()
481 if (!timer_pending(&blkiolat->timer)) in blkcg_iolatency_throttle()
482 mod_timer(&blkiolat->timer, jiffies + HZ); in blkcg_iolatency_throttle()
501 req_time = now - start; in iolatency_record_time()
504 * We don't want to count issue_as_root bio's in the cgroups latency in iolatency_record_time()
507 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) { in iolatency_record_time()
508 u64 sub = iolat->min_lat_nsec; in iolatency_record_time()
510 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time); in iolatency_record_time()
533 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_check_latencies()
539 parent = blkg_to_lat(blkg->parent); in iolatency_check_latencies()
543 lat_info = &parent->child_lat; in iolatency_check_latencies()
549 atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE) in iolatency_check_latencies()
553 spin_lock_irqsave(&lat_info->lock, flags); in iolatency_check_latencies()
555 latency_stat_sum(iolat, &iolat->cur_stat, &stat); in iolatency_check_latencies()
556 lat_info->nr_samples -= iolat->nr_samples; in iolatency_check_latencies()
557 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat); in iolatency_check_latencies()
558 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat); in iolatency_check_latencies()
560 if ((lat_info->last_scale_event >= now || in iolatency_check_latencies()
561 now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME)) in iolatency_check_latencies()
564 if (latency_sum_ok(iolat, &iolat->cur_stat) && in iolatency_check_latencies()
566 if (latency_stat_samples(iolat, &iolat->cur_stat) < in iolatency_check_latencies()
569 if (lat_info->scale_grp == iolat) { in iolatency_check_latencies()
570 lat_info->last_scale_event = now; in iolatency_check_latencies()
571 scale_cookie_change(iolat->blkiolat, lat_info, true); in iolatency_check_latencies()
573 } else if (lat_info->scale_lat == 0 || in iolatency_check_latencies()
574 lat_info->scale_lat >= iolat->min_lat_nsec) { in iolatency_check_latencies()
575 lat_info->last_scale_event = now; in iolatency_check_latencies()
576 if (!lat_info->scale_grp || in iolatency_check_latencies()
577 lat_info->scale_lat > iolat->min_lat_nsec) { in iolatency_check_latencies()
578 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec); in iolatency_check_latencies()
579 lat_info->scale_grp = iolat; in iolatency_check_latencies()
581 scale_cookie_change(iolat->blkiolat, lat_info, false); in iolatency_check_latencies()
583 latency_stat_init(iolat, &iolat->cur_stat); in iolatency_check_latencies()
585 spin_unlock_irqrestore(&lat_info->lock, flags); in iolatency_check_latencies()
599 blkg = bio->bi_blkg; in blkcg_iolatency_done_bio()
603 iolat = blkg_to_lat(bio->bi_blkg); in blkcg_iolatency_done_bio()
607 enabled = blk_iolatency_enabled(iolat->blkiolat); in blkcg_iolatency_done_bio()
612 while (blkg && blkg->parent) { in blkcg_iolatency_done_bio()
615 blkg = blkg->parent; in blkcg_iolatency_done_bio()
618 rqw = &iolat->rq_wait; in blkcg_iolatency_done_bio()
620 inflight = atomic_dec_return(&rqw->inflight); in blkcg_iolatency_done_bio()
626 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { in blkcg_iolatency_done_bio()
627 iolatency_record_time(iolat, &bio->bi_issue, now, in blkcg_iolatency_done_bio()
629 window_start = atomic64_read(&iolat->window_start); in blkcg_iolatency_done_bio()
631 (now - window_start) >= iolat->cur_win_nsec) { in blkcg_iolatency_done_bio()
632 if (atomic64_cmpxchg(&iolat->window_start, in blkcg_iolatency_done_bio()
637 wake_up(&rqw->wait); in blkcg_iolatency_done_bio()
638 blkg = blkg->parent; in blkcg_iolatency_done_bio()
646 del_timer_sync(&blkiolat->timer); in blkcg_iolatency_exit()
647 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); in blkcg_iolatency_exit()
666 blkiolat->rqos.q->root_blkg) { in blkiolatency_timer_fn()
683 lat_info = &iolat->child_lat; in blkiolatency_timer_fn()
684 cookie = atomic_read(&lat_info->scale_cookie); in blkiolatency_timer_fn()
689 spin_lock_irqsave(&lat_info->lock, flags); in blkiolatency_timer_fn()
690 if (lat_info->last_scale_event >= now) in blkiolatency_timer_fn()
697 if (lat_info->scale_grp == NULL) { in blkiolatency_timer_fn()
698 scale_cookie_change(iolat->blkiolat, lat_info, true); in blkiolatency_timer_fn()
707 if (now - lat_info->last_scale_event >= in blkiolatency_timer_fn()
709 lat_info->scale_grp = NULL; in blkiolatency_timer_fn()
711 spin_unlock_irqrestore(&lat_info->lock, flags); in blkiolatency_timer_fn()
726 return -ENOMEM; in blk_iolatency_init()
728 rqos = &blkiolat->rqos; in blk_iolatency_init()
729 rqos->id = RQ_QOS_LATENCY; in blk_iolatency_init()
730 rqos->ops = &blkcg_iolatency_ops; in blk_iolatency_init()
731 rqos->q = q; in blk_iolatency_init()
742 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); in blk_iolatency_init()
748 * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
754 u64 oldval = iolat->min_lat_nsec; in iolatency_set_min_lat_nsec()
756 iolat->min_lat_nsec = val; in iolatency_set_min_lat_nsec()
757 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE); in iolatency_set_min_lat_nsec()
758 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, in iolatency_set_min_lat_nsec()
765 return -1; in iolatency_set_min_lat_nsec()
772 if (blkg->parent) { in iolatency_clear_scaling()
773 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent); in iolatency_clear_scaling()
778 lat_info = &iolat->child_lat; in iolatency_clear_scaling()
779 spin_lock(&lat_info->lock); in iolatency_clear_scaling()
780 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_clear_scaling()
781 lat_info->last_scale_event = 0; in iolatency_clear_scaling()
782 lat_info->scale_grp = NULL; in iolatency_clear_scaling()
783 lat_info->scale_lat = 0; in iolatency_clear_scaling()
784 spin_unlock(&lat_info->lock); in iolatency_clear_scaling()
808 ret = -EINVAL; in iolatency_set_limit()
832 oldval = iolat->min_lat_nsec; in iolatency_set_limit()
836 if (!blk_get_queue(blkg->q)) { in iolatency_set_limit()
837 ret = -ENODEV; in iolatency_set_limit()
844 if (oldval != iolat->min_lat_nsec) { in iolatency_set_limit()
853 struct blk_iolatency *blkiolat = tmp->blkiolat; in iolatency_set_limit()
855 blk_mq_freeze_queue(blkg->q); in iolatency_set_limit()
858 atomic_inc(&blkiolat->enabled); in iolatency_set_limit()
859 else if (enable == -1) in iolatency_set_limit()
860 atomic_dec(&blkiolat->enabled); in iolatency_set_limit()
864 blk_mq_unfreeze_queue(blkg->q); in iolatency_set_limit()
867 blk_put_queue(blkg->q); in iolatency_set_limit()
876 const char *dname = blkg_dev_name(pd->blkg); in iolatency_prfill_limit()
878 if (!dname || !iolat->min_lat_nsec) in iolatency_prfill_limit()
881 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC)); in iolatency_prfill_limit()
889 &blkcg_policy_iolatency, seq_cft(sf)->private, false); in iolatency_print_limit()
902 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_ssd_stat()
907 if (iolat->rq_depth.max_depth == UINT_MAX) in iolatency_ssd_stat()
915 iolat->rq_depth.max_depth); in iolatency_ssd_stat()
928 if (iolat->ssd) in iolatency_pd_stat()
931 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC); in iolatency_pd_stat()
932 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC); in iolatency_pd_stat()
933 if (iolat->rq_depth.max_depth == UINT_MAX) in iolatency_pd_stat()
938 iolat->rq_depth.max_depth, avg_lat, cur_win); in iolatency_pd_stat()
948 iolat = kzalloc_node(sizeof(*iolat), gfp, q->node); in iolatency_pd_alloc()
951 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), in iolatency_pd_alloc()
953 if (!iolat->stats) { in iolatency_pd_alloc()
957 return &iolat->pd; in iolatency_pd_alloc()
964 struct rq_qos *rqos = blkcg_rq_qos(blkg->q); in iolatency_pd_init()
969 if (blk_queue_nonrot(blkg->q)) in iolatency_pd_init()
970 iolat->ssd = true; in iolatency_pd_init()
972 iolat->ssd = false; in iolatency_pd_init()
976 stat = per_cpu_ptr(iolat->stats, cpu); in iolatency_pd_init()
980 latency_stat_init(iolat, &iolat->cur_stat); in iolatency_pd_init()
981 rq_wait_init(&iolat->rq_wait); in iolatency_pd_init()
982 spin_lock_init(&iolat->child_lat.lock); in iolatency_pd_init()
983 iolat->rq_depth.queue_depth = blkg->q->nr_requests; in iolatency_pd_init()
984 iolat->rq_depth.max_depth = UINT_MAX; in iolatency_pd_init()
985 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth; in iolatency_pd_init()
986 iolat->blkiolat = blkiolat; in iolatency_pd_init()
987 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC; in iolatency_pd_init()
988 atomic64_set(&iolat->window_start, now); in iolatency_pd_init()
994 if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) { in iolatency_pd_init()
995 struct iolatency_grp *parent = blkg_to_lat(blkg->parent); in iolatency_pd_init()
996 atomic_set(&iolat->scale_cookie, in iolatency_pd_init()
997 atomic_read(&parent->child_lat.scale_cookie)); in iolatency_pd_init()
999 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_pd_init()
1002 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_pd_init()
1009 struct blk_iolatency *blkiolat = iolat->blkiolat; in iolatency_pd_offline()
1014 atomic_inc(&blkiolat->enabled); in iolatency_pd_offline()
1015 if (ret == -1) in iolatency_pd_offline()
1016 atomic_dec(&blkiolat->enabled); in iolatency_pd_offline()
1023 free_percpu(iolat->stats); in iolatency_pd_free()
1029 .name = "latency",