Lines Matching refs:tg

71 	struct throtl_grp	*tg;		/* tg this qnode belongs to */  member
244 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
246 return pd_to_blkg(&tg->pd); in tg_to_blkg()
273 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
275 if (tg) in sq_to_td()
276 return tg->td; in sq_to_td()
299 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) in tg_bps_limit() argument
301 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit()
308 td = tg->td; in tg_bps_limit()
309 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
313 tg->iops[rw][td->limit_index]) in tg_bps_limit()
319 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
320 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { in tg_bps_limit()
323 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
324 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit()
329 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
331 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit()
338 td = tg->td; in tg_iops_limit()
339 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
340 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
343 tg->bps[rw][td->limit_index]) in tg_iops_limit()
349 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
350 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { in tg_iops_limit()
353 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
356 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); in tg_iops_limit()
396 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
400 qn->tg = tg; in throtl_qnode_init()
419 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
471 *tg_to_put = qn->tg; in throtl_pop_queued()
473 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
494 struct throtl_grp *tg; in throtl_pd_alloc() local
497 tg = kzalloc_node(sizeof(*tg), gfp, q->node); in throtl_pd_alloc()
498 if (!tg) in throtl_pd_alloc()
501 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) in throtl_pd_alloc()
504 if (blkg_rwstat_init(&tg->stat_ios, gfp)) in throtl_pd_alloc()
507 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
510 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
511 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
514 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
515 tg->bps[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
516 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
517 tg->iops[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
518 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
519 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
520 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
521 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
522 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
525 tg->latency_target = DFL_LATENCY_TARGET; in throtl_pd_alloc()
526 tg->latency_target_conf = DFL_LATENCY_TARGET; in throtl_pd_alloc()
527 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
528 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
530 return &tg->pd; in throtl_pd_alloc()
533 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_alloc()
535 kfree(tg); in throtl_pd_alloc()
541 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init() local
542 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init()
544 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
562 tg->td = td; in throtl_pd_init()
570 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
572 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
573 struct throtl_data *td = tg->td; in tg_update_has_rules()
577 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
579 (tg_bps_limit(tg, rw) != U64_MAX || in tg_update_has_rules()
580 tg_iops_limit(tg, rw) != UINT_MAX)); in tg_update_has_rules()
585 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_online() local
590 tg_update_has_rules(tg); in throtl_pd_online()
602 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_update_limit_valid() local
604 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
605 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
623 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_offline() local
625 tg->bps[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
626 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
627 tg->iops[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
628 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
630 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
632 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
633 throtl_upgrade_state(tg->td); in throtl_pd_offline()
638 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free() local
640 del_timer_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
641 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_free()
642 blkg_rwstat_exit(&tg->stat_ios); in throtl_pd_free()
643 kfree(tg); in throtl_pd_free()
668 struct throtl_grp *tg; in update_min_dispatch_time() local
670 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
671 if (!tg) in update_min_dispatch_time()
674 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
677 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
679 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
683 unsigned long key = tg->disptime; in tg_service_queue_add()
698 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
699 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree, in tg_service_queue_add()
703 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
705 if (!(tg->flags & THROTL_TG_PENDING)) { in throtl_enqueue_tg()
706 tg_service_queue_add(tg); in throtl_enqueue_tg()
707 tg->flags |= THROTL_TG_PENDING; in throtl_enqueue_tg()
708 tg->service_queue.parent_sq->nr_pending++; in throtl_enqueue_tg()
712 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
714 if (tg->flags & THROTL_TG_PENDING) { in throtl_dequeue_tg()
715 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in throtl_dequeue_tg()
716 tg->flags &= ~THROTL_TG_PENDING; in throtl_dequeue_tg()
777 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
780 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
781 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
783 atomic_set(&tg->io_split_cnt[rw], 0); in throtl_start_new_slice_with_credit()
791 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
792 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
794 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
795 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
797 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
798 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
801 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
803 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
804 tg->io_disp[rw] = 0; in throtl_start_new_slice()
805 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
806 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
808 atomic_set(&tg->io_split_cnt[rw], 0); in throtl_start_new_slice()
810 throtl_log(&tg->service_queue, in throtl_start_new_slice()
812 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
813 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
816 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
819 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
822 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
825 throtl_set_slice_end(tg, rw, jiffy_end); in throtl_extend_slice()
826 throtl_log(&tg->service_queue, in throtl_extend_slice()
828 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
829 tg->slice_end[rw], jiffies); in throtl_extend_slice()
833 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
835 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
842 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
847 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
854 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
865 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
867 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
869 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
873 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
877 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
883 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
884 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
886 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
888 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
889 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
891 tg->io_disp[rw] = 0; in throtl_trim_slice()
893 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
895 throtl_log(&tg->service_queue, in throtl_trim_slice()
898 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
901 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
915 jiffy_elapsed = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
918 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_with_in_iops_limit()
935 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
949 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
963 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
967 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
969 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
975 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { in tg_with_in_bps_limit()
982 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_with_in_bps_limit()
1002 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
1007 u64 bps_limit = tg_bps_limit(tg, rw); in tg_may_dispatch()
1008 u32 iops_limit = tg_iops_limit(tg, rw); in tg_may_dispatch()
1016 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
1017 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
1033 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
1034 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
1036 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
1037 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1038 throtl_extend_slice(tg, rw, in tg_may_dispatch()
1039 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1043 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0); in tg_may_dispatch()
1045 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) && in tg_may_dispatch()
1046 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) { in tg_may_dispatch()
1057 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
1058 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
1063 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
1069 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
1070 tg->io_disp[rw]++; in throtl_charge_bio()
1071 tg->last_bytes_disp[rw] += bio_size; in throtl_charge_bio()
1072 tg->last_io_disp[rw]++; in throtl_charge_bio()
1094 struct throtl_grp *tg) in throtl_add_bio_tg() argument
1096 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
1100 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
1109 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
1114 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
1117 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
1119 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
1125 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
1129 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
1135 throtl_dequeue_tg(tg); in tg_update_disptime()
1136 tg->disptime = disptime; in tg_update_disptime()
1137 throtl_enqueue_tg(tg); in tg_update_disptime()
1140 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
1153 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
1155 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
1170 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
1180 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1181 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
1183 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1185 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1186 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1189 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1195 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
1197 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1206 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1208 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1216 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1218 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1233 struct throtl_grp *tg; in throtl_select_dispatch() local
1239 tg = throtl_rb_first(parent_sq); in throtl_select_dispatch()
1240 if (!tg) in throtl_select_dispatch()
1243 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1246 throtl_dequeue_tg(tg); in throtl_select_dispatch()
1248 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
1250 sq = &tg->service_queue; in throtl_select_dispatch()
1252 tg_update_disptime(tg); in throtl_select_dispatch()
1281 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1321 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1322 tg_update_disptime(tg); in throtl_pending_timer_fn()
1326 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1376 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1377 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1387 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1388 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1409 static void tg_conf_updated(struct throtl_grp *tg, bool global) in tg_conf_updated() argument
1411 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1415 throtl_log(&tg->service_queue, in tg_conf_updated()
1417 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1418 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1428 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1456 throtl_start_new_slice(tg, READ); in tg_conf_updated()
1457 throtl_start_new_slice(tg, WRITE); in tg_conf_updated()
1459 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1460 tg_update_disptime(tg); in tg_conf_updated()
1470 struct throtl_grp *tg; in tg_set_conf() local
1484 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1487 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1489 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1491 tg_conf_updated(tg, false); in tg_set_conf()
1587 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_limit() local
1606 if (tg->bps_conf[READ][off] == bps_dft && in tg_prfill_limit()
1607 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1608 tg->iops_conf[READ][off] == iops_dft && in tg_prfill_limit()
1609 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1611 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && in tg_prfill_limit()
1612 tg->latency_target_conf == DFL_LATENCY_TARGET))) in tg_prfill_limit()
1615 if (tg->bps_conf[READ][off] != U64_MAX) in tg_prfill_limit()
1617 tg->bps_conf[READ][off]); in tg_prfill_limit()
1618 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1620 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1621 if (tg->iops_conf[READ][off] != UINT_MAX) in tg_prfill_limit()
1623 tg->iops_conf[READ][off]); in tg_prfill_limit()
1624 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1626 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1628 if (tg->idletime_threshold_conf == ULONG_MAX) in tg_prfill_limit()
1632 tg->idletime_threshold_conf); in tg_prfill_limit()
1634 if (tg->latency_target_conf == ULONG_MAX) in tg_prfill_limit()
1638 " latency=%lu", tg->latency_target_conf); in tg_prfill_limit()
1659 struct throtl_grp *tg; in tg_set_limit() local
1670 tg = blkg_to_tg(ctx.blkg); in tg_set_limit()
1672 v[0] = tg->bps_conf[READ][index]; in tg_set_limit()
1673 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1674 v[2] = tg->iops_conf[READ][index]; in tg_set_limit()
1675 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1677 idle_time = tg->idletime_threshold_conf; in tg_set_limit()
1678 latency_time = tg->latency_target_conf; in tg_set_limit()
1718 tg->bps_conf[READ][index] = v[0]; in tg_set_limit()
1719 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1720 tg->iops_conf[READ][index] = v[2]; in tg_set_limit()
1721 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1724 tg->bps[READ][index] = v[0]; in tg_set_limit()
1725 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1726 tg->iops[READ][index] = v[2]; in tg_set_limit()
1727 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1729 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit()
1730 tg->bps_conf[READ][LIMIT_MAX]); in tg_set_limit()
1731 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1732 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1733 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit()
1734 tg->iops_conf[READ][LIMIT_MAX]); in tg_set_limit()
1735 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1736 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1737 tg->idletime_threshold_conf = idle_time; in tg_set_limit()
1738 tg->latency_target_conf = latency_time; in tg_set_limit()
1741 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || in tg_set_limit()
1742 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1743 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || in tg_set_limit()
1744 tg->latency_target_conf == DFL_LATENCY_TARGET) { in tg_set_limit()
1745 tg->bps[READ][LIMIT_LOW] = 0; in tg_set_limit()
1746 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1747 tg->iops[READ][LIMIT_LOW] = 0; in tg_set_limit()
1748 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1749 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in tg_set_limit()
1750 tg->latency_target = DFL_LATENCY_TARGET; in tg_set_limit()
1752 tg->idletime_threshold = tg->idletime_threshold_conf; in tg_set_limit()
1753 tg->latency_target = tg->latency_target_conf; in tg_set_limit()
1756 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1757 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1759 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1761 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1762 tg_conf_updated(tg, index == LIMIT_LOW && in tg_set_limit()
1763 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1808 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) in __tg_last_low_overflow_time() argument
1812 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) in __tg_last_low_overflow_time()
1813 rtime = tg->last_low_overflow_time[READ]; in __tg_last_low_overflow_time()
1814 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1815 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1820 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) in tg_last_low_overflow_time() argument
1823 struct throtl_grp *parent = tg; in tg_last_low_overflow_time()
1824 unsigned long ret = __tg_last_low_overflow_time(tg); in tg_last_low_overflow_time()
1847 static bool throtl_tg_is_idle(struct throtl_grp *tg) in throtl_tg_is_idle() argument
1859 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); in throtl_tg_is_idle()
1860 ret = tg->latency_target == DFL_LATENCY_TARGET || in throtl_tg_is_idle()
1861 tg->idletime_threshold == DFL_IDLE_THRESHOLD || in throtl_tg_is_idle()
1862 (ktime_get_ns() >> 10) - tg->last_finish_time > time || in throtl_tg_is_idle()
1863 tg->avg_idletime > tg->idletime_threshold || in throtl_tg_is_idle()
1864 (tg->latency_target && tg->bio_cnt && in throtl_tg_is_idle()
1865 tg->bad_bio_cnt * 5 < tg->bio_cnt); in throtl_tg_is_idle()
1866 throtl_log(&tg->service_queue, in throtl_tg_is_idle()
1868 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, in throtl_tg_is_idle()
1869 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1873 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) in throtl_tg_can_upgrade() argument
1875 struct throtl_service_queue *sq = &tg->service_queue; in throtl_tg_can_upgrade()
1882 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; in throtl_tg_can_upgrade()
1883 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1894 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1895 throtl_tg_is_idle(tg)) in throtl_tg_can_upgrade()
1900 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) in throtl_hierarchy_can_upgrade() argument
1903 if (throtl_tg_can_upgrade(tg)) in throtl_hierarchy_can_upgrade()
1905 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_upgrade()
1906 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_upgrade()
1926 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_can_upgrade() local
1928 if (tg == this_tg) in throtl_can_upgrade()
1930 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_can_upgrade()
1932 if (!throtl_hierarchy_can_upgrade(tg)) { in throtl_can_upgrade()
1941 static void throtl_upgrade_check(struct throtl_grp *tg) in throtl_upgrade_check() argument
1945 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1948 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1951 tg->last_check_time = now; in throtl_upgrade_check()
1954 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1957 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1958 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1972 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_upgrade_state() local
1973 struct throtl_service_queue *sq = &tg->service_queue; in throtl_upgrade_state()
1975 tg->disptime = jiffies - 1; in throtl_upgrade_state()
1999 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) in throtl_tg_can_downgrade() argument
2001 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade()
2009 time_after_eq(now, tg_last_low_overflow_time(tg) + in throtl_tg_can_downgrade()
2011 (!throtl_tg_is_idle(tg) || in throtl_tg_can_downgrade()
2012 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) in throtl_tg_can_downgrade()
2017 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) in throtl_hierarchy_can_downgrade() argument
2020 if (!throtl_tg_can_downgrade(tg)) in throtl_hierarchy_can_downgrade()
2022 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_downgrade()
2023 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_downgrade()
2029 static void throtl_downgrade_check(struct throtl_grp *tg) in throtl_downgrade_check() argument
2036 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
2037 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
2039 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_downgrade_check()
2041 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
2044 elapsed_time = now - tg->last_check_time; in throtl_downgrade_check()
2045 tg->last_check_time = now; in throtl_downgrade_check()
2047 if (time_before(now, tg_last_low_overflow_time(tg) + in throtl_downgrade_check()
2048 tg->td->throtl_slice)) in throtl_downgrade_check()
2051 if (tg->bps[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2052 bps = tg->last_bytes_disp[READ] * HZ; in throtl_downgrade_check()
2054 if (bps >= tg->bps[READ][LIMIT_LOW]) in throtl_downgrade_check()
2055 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2058 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2059 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2061 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2062 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2065 if (tg->iops[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2066 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0); in throtl_downgrade_check()
2067 iops = tg->last_io_disp[READ] * HZ / elapsed_time; in throtl_downgrade_check()
2068 if (iops >= tg->iops[READ][LIMIT_LOW]) in throtl_downgrade_check()
2069 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2072 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2073 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0); in throtl_downgrade_check()
2074 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2075 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2076 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2083 if (throtl_hierarchy_can_downgrade(tg)) in throtl_downgrade_check()
2084 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
2086 tg->last_bytes_disp[READ] = 0; in throtl_downgrade_check()
2087 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2088 tg->last_io_disp[READ] = 0; in throtl_downgrade_check()
2089 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2092 static void blk_throtl_update_idletime(struct throtl_grp *tg) in blk_throtl_update_idletime() argument
2095 unsigned long last_finish_time = tg->last_finish_time; in blk_throtl_update_idletime()
2102 last_finish_time == tg->checked_last_finish_time) in blk_throtl_update_idletime()
2105 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; in blk_throtl_update_idletime()
2106 tg->checked_last_finish_time = last_finish_time; in blk_throtl_update_idletime()
2216 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_bio() local
2220 struct throtl_data *td = tg->td; in blk_throtl_bio()
2229 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, in blk_throtl_bio()
2231 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); in blk_throtl_bio()
2234 if (!tg->has_rules[rw]) in blk_throtl_bio()
2241 blk_throtl_update_idletime(tg); in blk_throtl_bio()
2243 sq = &tg->service_queue; in blk_throtl_bio()
2247 if (tg->last_low_overflow_time[rw] == 0) in blk_throtl_bio()
2248 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2249 throtl_downgrade_check(tg); in blk_throtl_bio()
2250 throtl_upgrade_check(tg); in blk_throtl_bio()
2256 if (!tg_may_dispatch(tg, bio, NULL)) { in blk_throtl_bio()
2257 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2258 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2266 throtl_charge_bio(tg, bio); in blk_throtl_bio()
2279 throtl_trim_slice(tg, rw); in blk_throtl_bio()
2286 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
2288 tg = sq_to_tg(sq); in blk_throtl_bio()
2289 if (!tg) in blk_throtl_bio()
2296 tg->bytes_disp[rw], bio->bi_iter.bi_size, in blk_throtl_bio()
2297 tg_bps_limit(tg, rw), in blk_throtl_bio()
2298 tg->io_disp[rw], tg_iops_limit(tg, rw), in blk_throtl_bio()
2301 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2304 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
2313 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
2314 tg_update_disptime(tg); in blk_throtl_bio()
2315 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
2363 struct throtl_grp *tg; in blk_throtl_bio_endio() local
2373 tg = blkg_to_tg(blkg); in blk_throtl_bio_endio()
2374 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2378 tg->last_finish_time = finish_time_ns >> 10; in blk_throtl_bio_endio()
2388 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2391 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2396 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2397 tg->latency_target; in blk_throtl_bio_endio()
2399 tg->bad_bio_cnt++; in blk_throtl_bio_endio()
2404 tg->bio_cnt++; in blk_throtl_bio_endio()
2407 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { in blk_throtl_bio_endio()
2408 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2409 tg->bio_cnt /= 2; in blk_throtl_bio_endio()
2410 tg->bad_bio_cnt /= 2; in blk_throtl_bio_endio()