Lines Matching refs:tg
71 struct throtl_grp *tg; /* tg this qnode belongs to */ member
241 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
243 return pd_to_blkg(&tg->pd); in tg_to_blkg()
270 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
272 if (tg) in sq_to_td()
273 return tg->td; in sq_to_td()
296 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) in tg_bps_limit() argument
298 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit()
305 td = tg->td; in tg_bps_limit()
306 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
310 tg->iops[rw][td->limit_index]) in tg_bps_limit()
316 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
317 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { in tg_bps_limit()
320 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
321 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit()
326 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
328 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit()
335 td = tg->td; in tg_iops_limit()
336 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
337 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
340 tg->bps[rw][td->limit_index]) in tg_iops_limit()
346 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
347 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { in tg_iops_limit()
350 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
353 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); in tg_iops_limit()
393 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
397 qn->tg = tg; in throtl_qnode_init()
416 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
468 *tg_to_put = qn->tg; in throtl_pop_queued()
470 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
491 struct throtl_grp *tg; in throtl_pd_alloc() local
494 tg = kzalloc_node(sizeof(*tg), gfp, q->node); in throtl_pd_alloc()
495 if (!tg) in throtl_pd_alloc()
498 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) in throtl_pd_alloc()
501 if (blkg_rwstat_init(&tg->stat_ios, gfp)) in throtl_pd_alloc()
504 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
507 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
508 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
511 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
512 tg->bps[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
513 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
514 tg->iops[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
515 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
516 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
517 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
518 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
519 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
522 tg->latency_target = DFL_LATENCY_TARGET; in throtl_pd_alloc()
523 tg->latency_target_conf = DFL_LATENCY_TARGET; in throtl_pd_alloc()
524 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
525 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
527 return &tg->pd; in throtl_pd_alloc()
530 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_alloc()
532 kfree(tg); in throtl_pd_alloc()
538 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init() local
539 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init()
541 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
559 tg->td = td; in throtl_pd_init()
567 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
569 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
570 struct throtl_data *td = tg->td; in tg_update_has_rules()
574 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
576 (tg_bps_limit(tg, rw) != U64_MAX || in tg_update_has_rules()
577 tg_iops_limit(tg, rw) != UINT_MAX)); in tg_update_has_rules()
582 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_online() local
587 tg_update_has_rules(tg); in throtl_pd_online()
598 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_update_limit_valid() local
600 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
601 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
614 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_offline() local
616 tg->bps[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
617 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
618 tg->iops[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
619 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
621 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
623 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
624 throtl_upgrade_state(tg->td); in throtl_pd_offline()
629 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free() local
631 del_timer_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
632 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_free()
633 blkg_rwstat_exit(&tg->stat_ios); in throtl_pd_free()
634 kfree(tg); in throtl_pd_free()
659 struct throtl_grp *tg; in update_min_dispatch_time() local
661 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
662 if (!tg) in update_min_dispatch_time()
665 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
668 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
670 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
674 unsigned long key = tg->disptime; in tg_service_queue_add()
689 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
690 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree, in tg_service_queue_add()
694 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
696 if (!(tg->flags & THROTL_TG_PENDING)) { in throtl_enqueue_tg()
697 tg_service_queue_add(tg); in throtl_enqueue_tg()
698 tg->flags |= THROTL_TG_PENDING; in throtl_enqueue_tg()
699 tg->service_queue.parent_sq->nr_pending++; in throtl_enqueue_tg()
703 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
705 if (tg->flags & THROTL_TG_PENDING) { in throtl_dequeue_tg()
706 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in throtl_dequeue_tg()
707 tg->flags &= ~THROTL_TG_PENDING; in throtl_dequeue_tg()
768 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
771 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
772 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
780 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
781 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
783 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
784 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
786 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
787 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
790 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
792 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
793 tg->io_disp[rw] = 0; in throtl_start_new_slice()
794 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
795 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
796 throtl_log(&tg->service_queue, in throtl_start_new_slice()
798 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
799 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
802 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
805 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
808 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
811 throtl_set_slice_end(tg, rw, jiffy_end); in throtl_extend_slice()
812 throtl_log(&tg->service_queue, in throtl_extend_slice()
814 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
815 tg->slice_end[rw], jiffies); in throtl_extend_slice()
819 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
821 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
828 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
833 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
840 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
851 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
853 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
855 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
859 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
863 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
869 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
870 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
872 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
874 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
875 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
877 tg->io_disp[rw] = 0; in throtl_trim_slice()
879 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
881 throtl_log(&tg->service_queue, in throtl_trim_slice()
884 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
887 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
901 jiffy_elapsed = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
904 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_with_in_iops_limit()
921 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
935 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
949 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
953 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
955 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
961 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { in tg_with_in_bps_limit()
968 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_with_in_bps_limit()
988 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
993 u64 bps_limit = tg_bps_limit(tg, rw); in tg_may_dispatch()
994 u32 iops_limit = tg_iops_limit(tg, rw); in tg_may_dispatch()
1002 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
1003 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
1019 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
1020 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
1022 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
1023 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1024 throtl_extend_slice(tg, rw, in tg_may_dispatch()
1025 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1028 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) && in tg_may_dispatch()
1029 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) { in tg_may_dispatch()
1040 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
1041 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
1046 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
1052 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
1053 tg->io_disp[rw]++; in throtl_charge_bio()
1054 tg->last_bytes_disp[rw] += bio_size; in throtl_charge_bio()
1055 tg->last_io_disp[rw]++; in throtl_charge_bio()
1077 struct throtl_grp *tg) in throtl_add_bio_tg() argument
1079 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
1083 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
1092 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
1097 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
1100 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
1102 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
1108 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
1112 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
1118 throtl_dequeue_tg(tg); in tg_update_disptime()
1119 tg->disptime = disptime; in tg_update_disptime()
1120 throtl_enqueue_tg(tg); in tg_update_disptime()
1123 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
1136 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
1138 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
1153 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
1163 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1164 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
1166 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1168 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1169 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1172 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1178 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
1180 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1189 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1191 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1199 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1201 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1216 struct throtl_grp *tg; in throtl_select_dispatch() local
1222 tg = throtl_rb_first(parent_sq); in throtl_select_dispatch()
1223 if (!tg) in throtl_select_dispatch()
1226 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1229 throtl_dequeue_tg(tg); in throtl_select_dispatch()
1231 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
1233 sq = &tg->service_queue; in throtl_select_dispatch()
1235 tg_update_disptime(tg); in throtl_select_dispatch()
1264 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1304 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1305 tg_update_disptime(tg); in throtl_pending_timer_fn()
1309 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1359 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1360 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1370 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1371 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1392 static void tg_conf_updated(struct throtl_grp *tg, bool global) in tg_conf_updated() argument
1394 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1398 throtl_log(&tg->service_queue, in tg_conf_updated()
1400 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1401 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1411 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1439 throtl_start_new_slice(tg, READ); in tg_conf_updated()
1440 throtl_start_new_slice(tg, WRITE); in tg_conf_updated()
1442 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1443 tg_update_disptime(tg); in tg_conf_updated()
1453 struct throtl_grp *tg; in tg_set_conf() local
1467 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1470 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1472 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1474 tg_conf_updated(tg, false); in tg_set_conf()
1570 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_limit() local
1589 if (tg->bps_conf[READ][off] == bps_dft && in tg_prfill_limit()
1590 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1591 tg->iops_conf[READ][off] == iops_dft && in tg_prfill_limit()
1592 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1594 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && in tg_prfill_limit()
1595 tg->latency_target_conf == DFL_LATENCY_TARGET))) in tg_prfill_limit()
1598 if (tg->bps_conf[READ][off] != U64_MAX) in tg_prfill_limit()
1600 tg->bps_conf[READ][off]); in tg_prfill_limit()
1601 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1603 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1604 if (tg->iops_conf[READ][off] != UINT_MAX) in tg_prfill_limit()
1606 tg->iops_conf[READ][off]); in tg_prfill_limit()
1607 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1609 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1611 if (tg->idletime_threshold_conf == ULONG_MAX) in tg_prfill_limit()
1615 tg->idletime_threshold_conf); in tg_prfill_limit()
1617 if (tg->latency_target_conf == ULONG_MAX) in tg_prfill_limit()
1621 " latency=%lu", tg->latency_target_conf); in tg_prfill_limit()
1642 struct throtl_grp *tg; in tg_set_limit() local
1653 tg = blkg_to_tg(ctx.blkg); in tg_set_limit()
1655 v[0] = tg->bps_conf[READ][index]; in tg_set_limit()
1656 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1657 v[2] = tg->iops_conf[READ][index]; in tg_set_limit()
1658 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1660 idle_time = tg->idletime_threshold_conf; in tg_set_limit()
1661 latency_time = tg->latency_target_conf; in tg_set_limit()
1701 tg->bps_conf[READ][index] = v[0]; in tg_set_limit()
1702 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1703 tg->iops_conf[READ][index] = v[2]; in tg_set_limit()
1704 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1707 tg->bps[READ][index] = v[0]; in tg_set_limit()
1708 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1709 tg->iops[READ][index] = v[2]; in tg_set_limit()
1710 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1712 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit()
1713 tg->bps_conf[READ][LIMIT_MAX]); in tg_set_limit()
1714 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1715 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1716 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit()
1717 tg->iops_conf[READ][LIMIT_MAX]); in tg_set_limit()
1718 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1719 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1720 tg->idletime_threshold_conf = idle_time; in tg_set_limit()
1721 tg->latency_target_conf = latency_time; in tg_set_limit()
1724 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || in tg_set_limit()
1725 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1726 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || in tg_set_limit()
1727 tg->latency_target_conf == DFL_LATENCY_TARGET) { in tg_set_limit()
1728 tg->bps[READ][LIMIT_LOW] = 0; in tg_set_limit()
1729 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1730 tg->iops[READ][LIMIT_LOW] = 0; in tg_set_limit()
1731 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1732 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in tg_set_limit()
1733 tg->latency_target = DFL_LATENCY_TARGET; in tg_set_limit()
1735 tg->idletime_threshold = tg->idletime_threshold_conf; in tg_set_limit()
1736 tg->latency_target = tg->latency_target_conf; in tg_set_limit()
1739 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1740 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1742 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1744 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1745 tg_conf_updated(tg, index == LIMIT_LOW && in tg_set_limit()
1746 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1791 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) in __tg_last_low_overflow_time() argument
1795 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) in __tg_last_low_overflow_time()
1796 rtime = tg->last_low_overflow_time[READ]; in __tg_last_low_overflow_time()
1797 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1798 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1803 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) in tg_last_low_overflow_time() argument
1806 struct throtl_grp *parent = tg; in tg_last_low_overflow_time()
1807 unsigned long ret = __tg_last_low_overflow_time(tg); in tg_last_low_overflow_time()
1830 static bool throtl_tg_is_idle(struct throtl_grp *tg) in throtl_tg_is_idle() argument
1842 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); in throtl_tg_is_idle()
1843 ret = tg->latency_target == DFL_LATENCY_TARGET || in throtl_tg_is_idle()
1844 tg->idletime_threshold == DFL_IDLE_THRESHOLD || in throtl_tg_is_idle()
1845 (ktime_get_ns() >> 10) - tg->last_finish_time > time || in throtl_tg_is_idle()
1846 tg->avg_idletime > tg->idletime_threshold || in throtl_tg_is_idle()
1847 (tg->latency_target && tg->bio_cnt && in throtl_tg_is_idle()
1848 tg->bad_bio_cnt * 5 < tg->bio_cnt); in throtl_tg_is_idle()
1849 throtl_log(&tg->service_queue, in throtl_tg_is_idle()
1851 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, in throtl_tg_is_idle()
1852 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1856 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) in throtl_tg_can_upgrade() argument
1858 struct throtl_service_queue *sq = &tg->service_queue; in throtl_tg_can_upgrade()
1865 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; in throtl_tg_can_upgrade()
1866 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1877 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1878 throtl_tg_is_idle(tg)) in throtl_tg_can_upgrade()
1883 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) in throtl_hierarchy_can_upgrade() argument
1886 if (throtl_tg_can_upgrade(tg)) in throtl_hierarchy_can_upgrade()
1888 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_upgrade()
1889 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_upgrade()
1909 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_can_upgrade() local
1911 if (tg == this_tg) in throtl_can_upgrade()
1913 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_can_upgrade()
1915 if (!throtl_hierarchy_can_upgrade(tg)) { in throtl_can_upgrade()
1924 static void throtl_upgrade_check(struct throtl_grp *tg) in throtl_upgrade_check() argument
1928 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1931 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1934 tg->last_check_time = now; in throtl_upgrade_check()
1937 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1940 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1941 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1955 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_upgrade_state() local
1956 struct throtl_service_queue *sq = &tg->service_queue; in throtl_upgrade_state()
1958 tg->disptime = jiffies - 1; in throtl_upgrade_state()
1982 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) in throtl_tg_can_downgrade() argument
1984 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade()
1992 time_after_eq(now, tg_last_low_overflow_time(tg) + in throtl_tg_can_downgrade()
1994 (!throtl_tg_is_idle(tg) || in throtl_tg_can_downgrade()
1995 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) in throtl_tg_can_downgrade()
2000 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) in throtl_hierarchy_can_downgrade() argument
2003 if (!throtl_tg_can_downgrade(tg)) in throtl_hierarchy_can_downgrade()
2005 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_downgrade()
2006 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_downgrade()
2012 static void throtl_downgrade_check(struct throtl_grp *tg) in throtl_downgrade_check() argument
2019 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
2020 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
2022 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_downgrade_check()
2024 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
2027 elapsed_time = now - tg->last_check_time; in throtl_downgrade_check()
2028 tg->last_check_time = now; in throtl_downgrade_check()
2030 if (time_before(now, tg_last_low_overflow_time(tg) + in throtl_downgrade_check()
2031 tg->td->throtl_slice)) in throtl_downgrade_check()
2034 if (tg->bps[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2035 bps = tg->last_bytes_disp[READ] * HZ; in throtl_downgrade_check()
2037 if (bps >= tg->bps[READ][LIMIT_LOW]) in throtl_downgrade_check()
2038 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2041 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2042 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2044 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2045 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2048 if (tg->iops[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2049 iops = tg->last_io_disp[READ] * HZ / elapsed_time; in throtl_downgrade_check()
2050 if (iops >= tg->iops[READ][LIMIT_LOW]) in throtl_downgrade_check()
2051 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2054 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2055 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2056 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2057 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2064 if (throtl_hierarchy_can_downgrade(tg)) in throtl_downgrade_check()
2065 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
2067 tg->last_bytes_disp[READ] = 0; in throtl_downgrade_check()
2068 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2069 tg->last_io_disp[READ] = 0; in throtl_downgrade_check()
2070 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2073 static void blk_throtl_update_idletime(struct throtl_grp *tg) in blk_throtl_update_idletime() argument
2076 unsigned long last_finish_time = tg->last_finish_time; in blk_throtl_update_idletime()
2083 last_finish_time == tg->checked_last_finish_time) in blk_throtl_update_idletime()
2086 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; in blk_throtl_update_idletime()
2087 tg->checked_last_finish_time = last_finish_time; in blk_throtl_update_idletime()
2178 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_bio() local
2182 struct throtl_data *td = tg->td; in blk_throtl_bio()
2191 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, in blk_throtl_bio()
2193 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); in blk_throtl_bio()
2196 if (!tg->has_rules[rw]) in blk_throtl_bio()
2203 blk_throtl_update_idletime(tg); in blk_throtl_bio()
2205 sq = &tg->service_queue; in blk_throtl_bio()
2209 if (tg->last_low_overflow_time[rw] == 0) in blk_throtl_bio()
2210 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2211 throtl_downgrade_check(tg); in blk_throtl_bio()
2212 throtl_upgrade_check(tg); in blk_throtl_bio()
2218 if (!tg_may_dispatch(tg, bio, NULL)) { in blk_throtl_bio()
2219 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2220 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2228 throtl_charge_bio(tg, bio); in blk_throtl_bio()
2241 throtl_trim_slice(tg, rw); in blk_throtl_bio()
2248 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
2250 tg = sq_to_tg(sq); in blk_throtl_bio()
2251 if (!tg) in blk_throtl_bio()
2258 tg->bytes_disp[rw], bio->bi_iter.bi_size, in blk_throtl_bio()
2259 tg_bps_limit(tg, rw), in blk_throtl_bio()
2260 tg->io_disp[rw], tg_iops_limit(tg, rw), in blk_throtl_bio()
2263 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2266 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
2275 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
2276 tg_update_disptime(tg); in blk_throtl_bio()
2277 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
2325 struct throtl_grp *tg; in blk_throtl_bio_endio() local
2335 tg = blkg_to_tg(blkg); in blk_throtl_bio_endio()
2336 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2340 tg->last_finish_time = finish_time_ns >> 10; in blk_throtl_bio_endio()
2350 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2353 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2358 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2359 tg->latency_target; in blk_throtl_bio_endio()
2361 tg->bad_bio_cnt++; in blk_throtl_bio_endio()
2366 tg->bio_cnt++; in blk_throtl_bio_endio()
2369 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { in blk_throtl_bio_endio()
2370 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2371 tg->bio_cnt /= 2; in blk_throtl_bio_endio()
2372 tg->bad_bio_cnt /= 2; in blk_throtl_bio_endio()