Lines Matching refs:tg

70 	struct throtl_grp	*tg;		/* tg this qnode belongs to */  member
238 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
240 return pd_to_blkg(&tg->pd); in tg_to_blkg()
267 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
269 if (tg) in sq_to_td()
270 return tg->td; in sq_to_td()
293 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) in tg_bps_limit() argument
295 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit()
302 td = tg->td; in tg_bps_limit()
303 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
307 tg->iops[rw][td->limit_index]) in tg_bps_limit()
313 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
314 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { in tg_bps_limit()
317 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
318 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit()
323 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
325 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit()
332 td = tg->td; in tg_iops_limit()
333 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
334 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
337 tg->bps[rw][td->limit_index]) in tg_iops_limit()
343 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
344 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { in tg_iops_limit()
347 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
350 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); in tg_iops_limit()
390 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
394 qn->tg = tg; in throtl_qnode_init()
413 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
463 *tg_to_put = qn->tg; in throtl_pop_queued()
465 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
484 struct throtl_grp *tg; in throtl_pd_alloc() local
487 tg = kzalloc_node(sizeof(*tg), gfp, node); in throtl_pd_alloc()
488 if (!tg) in throtl_pd_alloc()
491 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
494 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
495 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
498 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
499 tg->bps[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
500 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
501 tg->iops[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
502 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
503 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
504 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
505 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
506 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
509 tg->latency_target = DFL_LATENCY_TARGET; in throtl_pd_alloc()
510 tg->latency_target_conf = DFL_LATENCY_TARGET; in throtl_pd_alloc()
511 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
512 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
514 return &tg->pd; in throtl_pd_alloc()
519 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init() local
520 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init()
522 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
540 tg->td = td; in throtl_pd_init()
548 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
550 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
551 struct throtl_data *td = tg->td; in tg_update_has_rules()
555 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
557 (tg_bps_limit(tg, rw) != U64_MAX || in tg_update_has_rules()
558 tg_iops_limit(tg, rw) != UINT_MAX)); in tg_update_has_rules()
563 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_online() local
568 tg_update_has_rules(tg); in throtl_pd_online()
579 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_update_limit_valid() local
581 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
582 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
595 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_offline() local
597 tg->bps[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
598 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
599 tg->iops[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
600 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
602 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
604 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
605 throtl_upgrade_state(tg->td); in throtl_pd_offline()
610 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free() local
612 del_timer_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
613 kfree(tg); in throtl_pd_free()
649 struct throtl_grp *tg; in update_min_dispatch_time() local
651 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
652 if (!tg) in update_min_dispatch_time()
655 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
658 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
660 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
664 unsigned long key = tg->disptime; in tg_service_queue_add()
680 parent_sq->first_pending = &tg->rb_node; in tg_service_queue_add()
682 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
683 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); in tg_service_queue_add()
686 static void __throtl_enqueue_tg(struct throtl_grp *tg) in __throtl_enqueue_tg() argument
688 tg_service_queue_add(tg); in __throtl_enqueue_tg()
689 tg->flags |= THROTL_TG_PENDING; in __throtl_enqueue_tg()
690 tg->service_queue.parent_sq->nr_pending++; in __throtl_enqueue_tg()
693 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
695 if (!(tg->flags & THROTL_TG_PENDING)) in throtl_enqueue_tg()
696 __throtl_enqueue_tg(tg); in throtl_enqueue_tg()
699 static void __throtl_dequeue_tg(struct throtl_grp *tg) in __throtl_dequeue_tg() argument
701 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in __throtl_dequeue_tg()
702 tg->flags &= ~THROTL_TG_PENDING; in __throtl_dequeue_tg()
705 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
707 if (tg->flags & THROTL_TG_PENDING) in throtl_dequeue_tg()
708 __throtl_dequeue_tg(tg); in throtl_dequeue_tg()
768 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
771 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
772 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
780 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
781 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
783 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
784 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
786 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
787 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
790 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
792 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
793 tg->io_disp[rw] = 0; in throtl_start_new_slice()
794 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
795 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
796 throtl_log(&tg->service_queue, in throtl_start_new_slice()
798 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
799 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
802 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
805 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
808 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
811 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_extend_slice()
812 throtl_log(&tg->service_queue, in throtl_extend_slice()
814 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
815 tg->slice_end[rw], jiffies); in throtl_extend_slice()
819 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
821 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
828 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
833 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
840 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
851 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
853 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
855 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
859 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
863 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
869 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
870 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
872 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
874 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
875 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
877 tg->io_disp[rw] = 0; in throtl_trim_slice()
879 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
881 throtl_log(&tg->service_queue, in throtl_trim_slice()
884 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
887 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
895 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
899 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_iops_limit()
901 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_iops_limit()
910 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd; in tg_with_in_iops_limit()
918 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
932 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
940 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
944 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
946 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
948 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd; in tg_with_in_bps_limit()
952 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { in tg_with_in_bps_limit()
959 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_with_in_bps_limit()
960 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); in tg_with_in_bps_limit()
979 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
991 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
992 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
995 if (tg_bps_limit(tg, rw) == U64_MAX && in tg_may_dispatch()
996 tg_iops_limit(tg, rw) == UINT_MAX) { in tg_may_dispatch()
1009 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
1010 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
1012 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
1013 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1014 throtl_extend_slice(tg, rw, in tg_may_dispatch()
1015 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1018 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && in tg_may_dispatch()
1019 tg_with_in_iops_limit(tg, bio, &iops_wait)) { in tg_may_dispatch()
1030 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
1031 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
1036 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
1042 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
1043 tg->io_disp[rw]++; in throtl_charge_bio()
1044 tg->last_bytes_disp[rw] += bio_size; in throtl_charge_bio()
1045 tg->last_io_disp[rw]++; in throtl_charge_bio()
1067 struct throtl_grp *tg) in throtl_add_bio_tg() argument
1069 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
1073 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
1082 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
1087 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
1090 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
1092 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
1098 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
1102 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
1108 throtl_dequeue_tg(tg); in tg_update_disptime()
1109 tg->disptime = disptime; in tg_update_disptime()
1110 throtl_enqueue_tg(tg); in tg_update_disptime()
1113 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
1126 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
1128 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
1143 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
1153 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1154 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
1156 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1158 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1159 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1162 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1168 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
1170 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1179 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1181 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1189 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1191 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1206 struct throtl_grp *tg = throtl_rb_first(parent_sq); in throtl_select_dispatch() local
1209 if (!tg) in throtl_select_dispatch()
1212 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1215 throtl_dequeue_tg(tg); in throtl_select_dispatch()
1217 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
1219 sq = &tg->service_queue; in throtl_select_dispatch()
1221 tg_update_disptime(tg); in throtl_select_dispatch()
1250 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1290 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1291 tg_update_disptime(tg); in throtl_pending_timer_fn()
1295 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1345 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1346 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1356 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1357 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1378 static void tg_conf_updated(struct throtl_grp *tg, bool global) in tg_conf_updated() argument
1380 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1384 throtl_log(&tg->service_queue, in tg_conf_updated()
1386 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1387 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1397 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1425 throtl_start_new_slice(tg, 0); in tg_conf_updated()
1426 throtl_start_new_slice(tg, 1); in tg_conf_updated()
1428 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1429 tg_update_disptime(tg); in tg_conf_updated()
1439 struct throtl_grp *tg; in tg_set_conf() local
1453 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1456 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1458 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1460 tg_conf_updated(tg, false); in tg_set_conf()
1530 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_limit() local
1549 if (tg->bps_conf[READ][off] == bps_dft && in tg_prfill_limit()
1550 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1551 tg->iops_conf[READ][off] == iops_dft && in tg_prfill_limit()
1552 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1554 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && in tg_prfill_limit()
1555 tg->latency_target_conf == DFL_LATENCY_TARGET))) in tg_prfill_limit()
1558 if (tg->bps_conf[READ][off] != U64_MAX) in tg_prfill_limit()
1560 tg->bps_conf[READ][off]); in tg_prfill_limit()
1561 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1563 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1564 if (tg->iops_conf[READ][off] != UINT_MAX) in tg_prfill_limit()
1566 tg->iops_conf[READ][off]); in tg_prfill_limit()
1567 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1569 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1571 if (tg->idletime_threshold_conf == ULONG_MAX) in tg_prfill_limit()
1575 tg->idletime_threshold_conf); in tg_prfill_limit()
1577 if (tg->latency_target_conf == ULONG_MAX) in tg_prfill_limit()
1581 " latency=%lu", tg->latency_target_conf); in tg_prfill_limit()
1602 struct throtl_grp *tg; in tg_set_limit() local
1613 tg = blkg_to_tg(ctx.blkg); in tg_set_limit()
1615 v[0] = tg->bps_conf[READ][index]; in tg_set_limit()
1616 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1617 v[2] = tg->iops_conf[READ][index]; in tg_set_limit()
1618 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1620 idle_time = tg->idletime_threshold_conf; in tg_set_limit()
1621 latency_time = tg->latency_target_conf; in tg_set_limit()
1661 tg->bps_conf[READ][index] = v[0]; in tg_set_limit()
1662 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1663 tg->iops_conf[READ][index] = v[2]; in tg_set_limit()
1664 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1667 tg->bps[READ][index] = v[0]; in tg_set_limit()
1668 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1669 tg->iops[READ][index] = v[2]; in tg_set_limit()
1670 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1672 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit()
1673 tg->bps_conf[READ][LIMIT_MAX]); in tg_set_limit()
1674 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1675 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1676 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit()
1677 tg->iops_conf[READ][LIMIT_MAX]); in tg_set_limit()
1678 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1679 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1680 tg->idletime_threshold_conf = idle_time; in tg_set_limit()
1681 tg->latency_target_conf = latency_time; in tg_set_limit()
1684 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || in tg_set_limit()
1685 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1686 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || in tg_set_limit()
1687 tg->latency_target_conf == DFL_LATENCY_TARGET) { in tg_set_limit()
1688 tg->bps[READ][LIMIT_LOW] = 0; in tg_set_limit()
1689 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1690 tg->iops[READ][LIMIT_LOW] = 0; in tg_set_limit()
1691 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1692 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in tg_set_limit()
1693 tg->latency_target = DFL_LATENCY_TARGET; in tg_set_limit()
1695 tg->idletime_threshold = tg->idletime_threshold_conf; in tg_set_limit()
1696 tg->latency_target = tg->latency_target_conf; in tg_set_limit()
1699 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1700 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1702 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1704 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1705 tg_conf_updated(tg, index == LIMIT_LOW && in tg_set_limit()
1706 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1751 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) in __tg_last_low_overflow_time() argument
1755 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) in __tg_last_low_overflow_time()
1756 rtime = tg->last_low_overflow_time[READ]; in __tg_last_low_overflow_time()
1757 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1758 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1763 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) in tg_last_low_overflow_time() argument
1766 struct throtl_grp *parent = tg; in tg_last_low_overflow_time()
1767 unsigned long ret = __tg_last_low_overflow_time(tg); in tg_last_low_overflow_time()
1790 static bool throtl_tg_is_idle(struct throtl_grp *tg) in throtl_tg_is_idle() argument
1802 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); in throtl_tg_is_idle()
1803 ret = tg->latency_target == DFL_LATENCY_TARGET || in throtl_tg_is_idle()
1804 tg->idletime_threshold == DFL_IDLE_THRESHOLD || in throtl_tg_is_idle()
1805 (ktime_get_ns() >> 10) - tg->last_finish_time > time || in throtl_tg_is_idle()
1806 tg->avg_idletime > tg->idletime_threshold || in throtl_tg_is_idle()
1807 (tg->latency_target && tg->bio_cnt && in throtl_tg_is_idle()
1808 tg->bad_bio_cnt * 5 < tg->bio_cnt); in throtl_tg_is_idle()
1809 throtl_log(&tg->service_queue, in throtl_tg_is_idle()
1811 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, in throtl_tg_is_idle()
1812 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1816 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) in throtl_tg_can_upgrade() argument
1818 struct throtl_service_queue *sq = &tg->service_queue; in throtl_tg_can_upgrade()
1825 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; in throtl_tg_can_upgrade()
1826 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1837 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1838 throtl_tg_is_idle(tg)) in throtl_tg_can_upgrade()
1843 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) in throtl_hierarchy_can_upgrade() argument
1846 if (throtl_tg_can_upgrade(tg)) in throtl_hierarchy_can_upgrade()
1848 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_upgrade()
1849 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_upgrade()
1869 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_can_upgrade() local
1871 if (tg == this_tg) in throtl_can_upgrade()
1873 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_can_upgrade()
1875 if (!throtl_hierarchy_can_upgrade(tg)) { in throtl_can_upgrade()
1884 static void throtl_upgrade_check(struct throtl_grp *tg) in throtl_upgrade_check() argument
1888 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1891 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1894 tg->last_check_time = now; in throtl_upgrade_check()
1897 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1900 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1901 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1915 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_upgrade_state() local
1916 struct throtl_service_queue *sq = &tg->service_queue; in throtl_upgrade_state()
1918 tg->disptime = jiffies - 1; in throtl_upgrade_state()
1942 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) in throtl_tg_can_downgrade() argument
1944 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade()
1952 time_after_eq(now, tg_last_low_overflow_time(tg) + in throtl_tg_can_downgrade()
1954 (!throtl_tg_is_idle(tg) || in throtl_tg_can_downgrade()
1955 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) in throtl_tg_can_downgrade()
1960 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) in throtl_hierarchy_can_downgrade() argument
1963 if (!throtl_tg_can_downgrade(tg)) in throtl_hierarchy_can_downgrade()
1965 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_downgrade()
1966 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_downgrade()
1972 static void throtl_downgrade_check(struct throtl_grp *tg) in throtl_downgrade_check() argument
1979 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
1980 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
1982 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_downgrade_check()
1984 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
1987 elapsed_time = now - tg->last_check_time; in throtl_downgrade_check()
1988 tg->last_check_time = now; in throtl_downgrade_check()
1990 if (time_before(now, tg_last_low_overflow_time(tg) + in throtl_downgrade_check()
1991 tg->td->throtl_slice)) in throtl_downgrade_check()
1994 if (tg->bps[READ][LIMIT_LOW]) { in throtl_downgrade_check()
1995 bps = tg->last_bytes_disp[READ] * HZ; in throtl_downgrade_check()
1997 if (bps >= tg->bps[READ][LIMIT_LOW]) in throtl_downgrade_check()
1998 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2001 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2002 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2004 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2005 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2008 if (tg->iops[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2009 iops = tg->last_io_disp[READ] * HZ / elapsed_time; in throtl_downgrade_check()
2010 if (iops >= tg->iops[READ][LIMIT_LOW]) in throtl_downgrade_check()
2011 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2014 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2015 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2016 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2017 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2024 if (throtl_hierarchy_can_downgrade(tg)) in throtl_downgrade_check()
2025 throtl_downgrade_state(tg->td, LIMIT_LOW); in throtl_downgrade_check()
2027 tg->last_bytes_disp[READ] = 0; in throtl_downgrade_check()
2028 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2029 tg->last_io_disp[READ] = 0; in throtl_downgrade_check()
2030 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2033 static void blk_throtl_update_idletime(struct throtl_grp *tg) in blk_throtl_update_idletime() argument
2036 unsigned long last_finish_time = tg->last_finish_time; in blk_throtl_update_idletime()
2039 last_finish_time == tg->checked_last_finish_time) in blk_throtl_update_idletime()
2042 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; in blk_throtl_update_idletime()
2043 tg->checked_last_finish_time = last_finish_time; in blk_throtl_update_idletime()
2129 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) in blk_throtl_assoc_bio() argument
2133 if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) in blk_throtl_assoc_bio()
2143 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); in blk_throtl_bio() local
2147 struct throtl_data *td = tg->td; in blk_throtl_bio()
2152 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) in blk_throtl_bio()
2162 blk_throtl_assoc_bio(tg, bio); in blk_throtl_bio()
2163 blk_throtl_update_idletime(tg); in blk_throtl_bio()
2165 sq = &tg->service_queue; in blk_throtl_bio()
2169 if (tg->last_low_overflow_time[rw] == 0) in blk_throtl_bio()
2170 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2171 throtl_downgrade_check(tg); in blk_throtl_bio()
2172 throtl_upgrade_check(tg); in blk_throtl_bio()
2178 if (!tg_may_dispatch(tg, bio, NULL)) { in blk_throtl_bio()
2179 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2180 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2188 throtl_charge_bio(tg, bio); in blk_throtl_bio()
2201 throtl_trim_slice(tg, rw); in blk_throtl_bio()
2208 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
2210 tg = sq_to_tg(sq); in blk_throtl_bio()
2211 if (!tg) in blk_throtl_bio()
2218 tg->bytes_disp[rw], bio->bi_iter.bi_size, in blk_throtl_bio()
2219 tg_bps_limit(tg, rw), in blk_throtl_bio()
2220 tg->io_disp[rw], tg_iops_limit(tg, rw), in blk_throtl_bio()
2223 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2226 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
2235 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
2236 tg_update_disptime(tg); in blk_throtl_bio()
2237 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
2283 struct throtl_grp *tg; in blk_throtl_bio_endio() local
2293 tg = blkg_to_tg(blkg); in blk_throtl_bio_endio()
2296 tg->last_finish_time = finish_time_ns >> 10; in blk_throtl_bio_endio()
2306 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2309 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2314 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2315 tg->latency_target; in blk_throtl_bio_endio()
2317 tg->bad_bio_cnt++; in blk_throtl_bio_endio()
2322 tg->bio_cnt++; in blk_throtl_bio_endio()
2325 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { in blk_throtl_bio_endio()
2326 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2327 tg->bio_cnt /= 2; in blk_throtl_bio_endio()
2328 tg->bad_bio_cnt /= 2; in blk_throtl_bio_endio()
2340 struct throtl_grp *tg; in tg_drain_bios() local
2342 while ((tg = throtl_rb_first(parent_sq))) { in tg_drain_bios()
2343 struct throtl_service_queue *sq = &tg->service_queue; in tg_drain_bios()
2346 throtl_dequeue_tg(tg); in tg_drain_bios()
2349 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()
2351 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()