Lines Matching refs:td

124 		return tg->td;  in sq_to_td()
137 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) in throtl_adjusted_limit() argument
140 if (td->scale < 4096 && time_after_eq(jiffies, in throtl_adjusted_limit()
141 td->low_upgrade_time + td->scale * td->throtl_slice)) in throtl_adjusted_limit()
142 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; in throtl_adjusted_limit()
144 return low + (low >> 1) * td->scale; in throtl_adjusted_limit()
150 struct throtl_data *td; in tg_bps_limit() local
156 td = tg->td; in tg_bps_limit()
157 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
158 if (ret == 0 && td->limit_index == LIMIT_LOW) { in tg_bps_limit()
161 tg->iops[rw][td->limit_index]) in tg_bps_limit()
167 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
171 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
180 struct throtl_data *td; in tg_iops_limit() local
186 td = tg->td; in tg_iops_limit()
187 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
188 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
191 tg->bps[rw][td->limit_index]) in tg_iops_limit()
197 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
201 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
391 struct throtl_data *td = blkg->q->td; in throtl_pd_init() local
407 sq->parent_sq = &td->service_queue; in throtl_pd_init()
410 tg->td = td; in throtl_pd_init()
421 struct throtl_data *td = tg->td; in tg_update_has_rules() local
427 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
431 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
447 static void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
465 td->limit_valid[LIMIT_LOW] = low_valid; in blk_throtl_update_limit_valid()
468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
473 static void throtl_upgrade_state(struct throtl_data *td);
483 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
485 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
486 throtl_upgrade_state(tg->td); in throtl_pd_offline()
650 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
663 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
678 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
724 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
728 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
732 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
736 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
752 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
840 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_within_iops_limit()
876 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_within_bps_limit()
878 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_within_bps_limit()
944 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
946 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1084 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1085 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1160 static bool throtl_can_upgrade(struct throtl_data *td,
1181 struct throtl_data *td = sq_to_td(sq); in throtl_pending_timer_fn() local
1191 q = td->queue; in throtl_pending_timer_fn()
1198 if (throtl_can_upgrade(td, NULL)) in throtl_pending_timer_fn()
1199 throtl_upgrade_state(td); in throtl_pending_timer_fn()
1241 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1257 struct throtl_data *td = container_of(work, struct throtl_data, in blk_throtl_dispatch_work_fn() local
1259 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1260 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1337 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1667 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1668 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1670 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1672 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1674 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1703 struct throtl_data *td = q->td; in throtl_shutdown_wq() local
1705 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1815 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1840 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1858 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
1864 if (td->limit_index != LIMIT_LOW) in throtl_can_upgrade()
1867 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) in throtl_can_upgrade()
1871 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1891 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1894 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1900 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1903 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1904 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1907 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
1912 throtl_log(&td->service_queue, "upgrade to max"); in throtl_upgrade_state()
1913 td->limit_index = LIMIT_MAX; in throtl_upgrade_state()
1914 td->low_upgrade_time = jiffies; in throtl_upgrade_state()
1915 td->scale = 0; in throtl_upgrade_state()
1917 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
1926 throtl_select_dispatch(&td->service_queue); in throtl_upgrade_state()
1927 throtl_schedule_next_dispatch(&td->service_queue, true); in throtl_upgrade_state()
1928 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_upgrade_state()
1931 static void throtl_downgrade_state(struct throtl_data *td) in throtl_downgrade_state() argument
1933 td->scale /= 2; in throtl_downgrade_state()
1935 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); in throtl_downgrade_state()
1936 if (td->scale) { in throtl_downgrade_state()
1937 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; in throtl_downgrade_state()
1941 td->limit_index = LIMIT_LOW; in throtl_downgrade_state()
1942 td->low_downgrade_time = jiffies; in throtl_downgrade_state()
1947 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade() local
1954 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && in throtl_tg_can_downgrade()
1956 td->throtl_slice) && in throtl_tg_can_downgrade()
1982 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
1983 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
1987 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
1994 tg->td->throtl_slice)) in throtl_downgrade_check()
2028 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
2053 static void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2060 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW]) in throtl_update_latency_buckets()
2062 if (time_before(jiffies, td->last_calculate_time + HZ)) in throtl_update_latency_buckets()
2064 td->last_calculate_time = jiffies; in throtl_update_latency_buckets()
2069 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; in throtl_update_latency_buckets()
2075 bucket = per_cpu_ptr(td->latency_buckets[rw], in throtl_update_latency_buckets()
2101 if (td->avg_buckets[rw][i].latency < last_latency[rw]) in throtl_update_latency_buckets()
2102 td->avg_buckets[rw][i].latency = in throtl_update_latency_buckets()
2107 if (!td->avg_buckets[rw][i].valid) in throtl_update_latency_buckets()
2110 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + in throtl_update_latency_buckets()
2113 td->avg_buckets[rw][i].latency = max(latency[rw], in throtl_update_latency_buckets()
2115 td->avg_buckets[rw][i].valid = true; in throtl_update_latency_buckets()
2116 last_latency[rw] = td->avg_buckets[rw][i].latency; in throtl_update_latency_buckets()
2121 throtl_log(&td->service_queue, in throtl_update_latency_buckets()
2124 td->avg_buckets[READ][i].latency, in throtl_update_latency_buckets()
2125 td->avg_buckets[READ][i].valid, in throtl_update_latency_buckets()
2126 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2127 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2130 static inline void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2146 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
2152 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
2166 struct throtl_data *td = tg->td; in __blk_throtl_bio() local
2178 throtl_update_latency_buckets(td); in __blk_throtl_bio()
2197 if (throtl_can_upgrade(td, tg)) { in __blk_throtl_bio()
2198 throtl_upgrade_state(td); in __blk_throtl_bio()
2244 td->nr_queued[rw]++; in __blk_throtl_bio()
2261 if (throttled || !td->track_bio_latency) in __blk_throtl_bio()
2271 static void throtl_track_latency(struct throtl_data *td, sector_t size, in throtl_track_latency() argument
2278 if (!td || td->limit_index != LIMIT_LOW || in throtl_track_latency()
2280 !blk_queue_nonrot(td->queue)) in throtl_track_latency()
2285 latency = get_cpu_ptr(td->latency_buckets[rw]); in throtl_track_latency()
2288 put_cpu_ptr(td->latency_buckets[rw]); in throtl_track_latency()
2294 struct throtl_data *td = q->td; in blk_throtl_stat_add() local
2296 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq), in blk_throtl_stat_add()
2314 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2328 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2331 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2336 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2348 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2358 struct throtl_data *td; in blk_throtl_init() local
2361 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
2362 if (!td) in blk_throtl_init()
2364 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2366 if (!td->latency_buckets[READ]) { in blk_throtl_init()
2367 kfree(td); in blk_throtl_init()
2370 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2372 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2373 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2374 kfree(td); in blk_throtl_init()
2378 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
2379 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
2381 q->td = td; in blk_throtl_init()
2382 td->queue = q; in blk_throtl_init()
2384 td->limit_valid[LIMIT_MAX] = true; in blk_throtl_init()
2385 td->limit_index = LIMIT_MAX; in blk_throtl_init()
2386 td->low_upgrade_time = jiffies; in blk_throtl_init()
2387 td->low_downgrade_time = jiffies; in blk_throtl_init()
2392 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2393 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2394 kfree(td); in blk_throtl_init()
2403 BUG_ON(!q->td); in blk_throtl_exit()
2404 del_timer_sync(&q->td->service_queue.pending_timer); in blk_throtl_exit()
2407 free_percpu(q->td->latency_buckets[READ]); in blk_throtl_exit()
2408 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2409 kfree(q->td); in blk_throtl_exit()
2415 struct throtl_data *td; in blk_throtl_register() local
2418 td = q->td; in blk_throtl_register()
2419 BUG_ON(!td); in blk_throtl_register()
2422 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_register()
2423 td->filtered_latency = LATENCY_FILTERED_SSD; in blk_throtl_register()
2425 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register()
2426 td->filtered_latency = LATENCY_FILTERED_HD; in blk_throtl_register()
2428 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register()
2429 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register()
2434 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register()
2437 td->track_bio_latency = !queue_is_mq(q); in blk_throtl_register()
2438 if (!td->track_bio_latency) in blk_throtl_register()
2445 if (!q->td) in blk_throtl_sample_time_show()
2447 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show()
2456 if (!q->td) in blk_throtl_sample_time_store()
2463 q->td->throtl_slice = t; in blk_throtl_sample_time_store()