Lines Matching refs:td

115 	struct throtl_data *td;  member
270 return tg->td; in sq_to_td()
283 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) in throtl_adjusted_limit() argument
286 if (td->scale < 4096 && time_after_eq(jiffies, in throtl_adjusted_limit()
287 td->low_upgrade_time + td->scale * td->throtl_slice)) in throtl_adjusted_limit()
288 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; in throtl_adjusted_limit()
290 return low + (low >> 1) * td->scale; in throtl_adjusted_limit()
296 struct throtl_data *td; in tg_bps_limit() local
302 td = tg->td; in tg_bps_limit()
303 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
304 if (ret == 0 && td->limit_index == LIMIT_LOW) { in tg_bps_limit()
307 tg->iops[rw][td->limit_index]) in tg_bps_limit()
313 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
317 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
326 struct throtl_data *td; in tg_iops_limit() local
332 td = tg->td; in tg_iops_limit()
333 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
334 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
337 tg->bps[rw][td->limit_index]) in tg_iops_limit()
343 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
347 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
521 struct throtl_data *td = blkg->q->td; in throtl_pd_init() local
537 sq->parent_sq = &td->service_queue; in throtl_pd_init()
540 tg->td = td; in throtl_pd_init()
551 struct throtl_data *td = tg->td; in tg_update_has_rules() local
556 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
571 static void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
578 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
589 td->limit_valid[LIMIT_LOW] = low_valid; in blk_throtl_update_limit_valid()
592 static void throtl_upgrade_state(struct throtl_data *td);
602 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
604 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
605 throtl_upgrade_state(tg->td); in throtl_pd_offline()
783 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
795 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
805 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
811 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_extend_slice()
851 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
855 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
859 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
863 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
879 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
899 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_iops_limit()
901 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_iops_limit()
944 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
946 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
1013 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1015 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1158 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1159 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1230 static bool throtl_can_upgrade(struct throtl_data *td,
1251 struct throtl_data *td = sq_to_td(sq); in throtl_pending_timer_fn() local
1252 struct request_queue *q = td->queue; in throtl_pending_timer_fn()
1258 if (throtl_can_upgrade(td, NULL)) in throtl_pending_timer_fn()
1259 throtl_upgrade_state(td); in throtl_pending_timer_fn()
1301 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1317 struct throtl_data *td = container_of(work, struct throtl_data, in blk_throtl_dispatch_work_fn() local
1319 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1320 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1397 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1699 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1700 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1702 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1704 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1706 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1735 struct throtl_data *td = q->td; in throtl_shutdown_wq() local
1737 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1812 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1837 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1855 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
1861 if (td->limit_index != LIMIT_LOW) in throtl_can_upgrade()
1864 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) in throtl_can_upgrade()
1868 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1888 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1891 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1897 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1900 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1901 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1904 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
1909 throtl_log(&td->service_queue, "upgrade to max"); in throtl_upgrade_state()
1910 td->limit_index = LIMIT_MAX; in throtl_upgrade_state()
1911 td->low_upgrade_time = jiffies; in throtl_upgrade_state()
1912 td->scale = 0; in throtl_upgrade_state()
1914 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
1923 throtl_select_dispatch(&td->service_queue); in throtl_upgrade_state()
1924 throtl_schedule_next_dispatch(&td->service_queue, true); in throtl_upgrade_state()
1925 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_upgrade_state()
1928 static void throtl_downgrade_state(struct throtl_data *td, int new) in throtl_downgrade_state() argument
1930 td->scale /= 2; in throtl_downgrade_state()
1932 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); in throtl_downgrade_state()
1933 if (td->scale) { in throtl_downgrade_state()
1934 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; in throtl_downgrade_state()
1938 td->limit_index = new; in throtl_downgrade_state()
1939 td->low_downgrade_time = jiffies; in throtl_downgrade_state()
1944 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade() local
1951 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && in throtl_tg_can_downgrade()
1953 td->throtl_slice) && in throtl_tg_can_downgrade()
1979 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
1980 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
1984 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
1991 tg->td->throtl_slice)) in throtl_downgrade_check()
2025 throtl_downgrade_state(tg->td, LIMIT_LOW); in throtl_downgrade_check()
2047 static void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2054 if (!blk_queue_nonrot(td->queue)) in throtl_update_latency_buckets()
2056 if (time_before(jiffies, td->last_calculate_time + HZ)) in throtl_update_latency_buckets()
2058 td->last_calculate_time = jiffies; in throtl_update_latency_buckets()
2063 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; in throtl_update_latency_buckets()
2069 bucket = per_cpu_ptr(td->latency_buckets[rw], in throtl_update_latency_buckets()
2095 if (td->avg_buckets[rw][i].latency < last_latency[rw]) in throtl_update_latency_buckets()
2096 td->avg_buckets[rw][i].latency = in throtl_update_latency_buckets()
2101 if (!td->avg_buckets[rw][i].valid) in throtl_update_latency_buckets()
2104 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + in throtl_update_latency_buckets()
2107 td->avg_buckets[rw][i].latency = max(latency[rw], in throtl_update_latency_buckets()
2109 td->avg_buckets[rw][i].valid = true; in throtl_update_latency_buckets()
2110 last_latency[rw] = td->avg_buckets[rw][i].latency; in throtl_update_latency_buckets()
2115 throtl_log(&td->service_queue, in throtl_update_latency_buckets()
2118 td->avg_buckets[READ][i].latency, in throtl_update_latency_buckets()
2119 td->avg_buckets[READ][i].valid, in throtl_update_latency_buckets()
2120 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2121 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2124 static inline void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2147 struct throtl_data *td = tg->td; in blk_throtl_bio() local
2157 throtl_update_latency_buckets(td); in blk_throtl_bio()
2180 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2181 throtl_upgrade_state(td); in blk_throtl_bio()
2225 td->nr_queued[rw]++; in blk_throtl_bio()
2246 if (throttled || !td->track_bio_latency) in blk_throtl_bio()
2253 static void throtl_track_latency(struct throtl_data *td, sector_t size, in throtl_track_latency() argument
2259 if (!td || td->limit_index != LIMIT_LOW || in throtl_track_latency()
2261 !blk_queue_nonrot(td->queue)) in throtl_track_latency()
2266 latency = get_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
2269 put_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
2275 struct throtl_data *td = q->td; in blk_throtl_stat_add() local
2277 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10); in blk_throtl_stat_add()
2306 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2309 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2314 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2326 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2364 struct throtl_data *td = q->td; in blk_throtl_drain() local
2379 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) in blk_throtl_drain()
2383 tg_drain_bios(&td->service_queue); in blk_throtl_drain()
2390 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], in blk_throtl_drain()
2399 struct throtl_data *td; in blk_throtl_init() local
2402 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
2403 if (!td) in blk_throtl_init()
2405 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2407 if (!td->latency_buckets[READ]) { in blk_throtl_init()
2408 kfree(td); in blk_throtl_init()
2411 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2413 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2414 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2415 kfree(td); in blk_throtl_init()
2419 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
2420 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
2422 q->td = td; in blk_throtl_init()
2423 td->queue = q; in blk_throtl_init()
2425 td->limit_valid[LIMIT_MAX] = true; in blk_throtl_init()
2426 td->limit_index = LIMIT_MAX; in blk_throtl_init()
2427 td->low_upgrade_time = jiffies; in blk_throtl_init()
2428 td->low_downgrade_time = jiffies; in blk_throtl_init()
2433 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2434 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2435 kfree(td); in blk_throtl_init()
2442 BUG_ON(!q->td); in blk_throtl_exit()
2445 free_percpu(q->td->latency_buckets[READ]); in blk_throtl_exit()
2446 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2447 kfree(q->td); in blk_throtl_exit()
2452 struct throtl_data *td; in blk_throtl_register_queue() local
2455 td = q->td; in blk_throtl_register_queue()
2456 BUG_ON(!td); in blk_throtl_register_queue()
2459 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_register_queue()
2460 td->filtered_latency = LATENCY_FILTERED_SSD; in blk_throtl_register_queue()
2462 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2463 td->filtered_latency = LATENCY_FILTERED_HD; in blk_throtl_register_queue()
2465 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2466 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2471 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2474 td->track_bio_latency = !queue_is_rq_based(q); in blk_throtl_register_queue()
2475 if (!td->track_bio_latency) in blk_throtl_register_queue()
2482 if (!q->td) in blk_throtl_sample_time_show()
2484 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show()
2493 if (!q->td) in blk_throtl_sample_time_store()
2500 q->td->throtl_slice = t; in blk_throtl_sample_time_store()