Lines Matching refs:WRITE
506 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
513 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
515 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
517 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
519 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
573 for (rw = READ; rw <= WRITE; rw++) in tg_update_has_rules()
600 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
601 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
617 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
619 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
1110 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
1198 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
1281 sq->nr_queued[READ] + sq->nr_queued[WRITE], in throtl_pending_timer_fn()
1282 sq->nr_queued[READ], sq->nr_queued[WRITE]); in throtl_pending_timer_fn()
1343 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1400 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1401 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1440 throtl_start_new_slice(tg, WRITE); in tg_conf_updated()
1528 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1540 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1590 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1592 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1601 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1603 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1607 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1609 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1656 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1658 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1702 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1704 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1708 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1710 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1714 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1715 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1718 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1719 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1725 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1729 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1731 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1797 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1798 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1821 !parent->bps[WRITE][LIMIT_LOW] && in tg_last_low_overflow_time()
1822 !parent->iops[WRITE][LIMIT_LOW]) in tg_last_low_overflow_time()
1866 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1870 (!write_limit || sq->nr_queued[WRITE])) in throtl_tg_can_upgrade()
1872 if (write_limit && sq->nr_queued[WRITE] && in throtl_tg_can_upgrade()
2041 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2042 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2044 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2045 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2054 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2055 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2056 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2057 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2068 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2070 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2105 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2136 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2164 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2165 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2261 sq->nr_queued[READ], sq->nr_queued[WRITE]); in blk_throtl_bio()
2391 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2393 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2414 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2426 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2446 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()