Lines Matching refs:WRITE

333 	INIT_LIST_HEAD(&sq->queued[WRITE]);  in throtl_service_queue_init()
356 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
363 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
365 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
367 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
369 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
424 for (rw = READ; rw <= WRITE; rw++) { in tg_update_has_rules()
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
479 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
481 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
822 if (tg->service_queue.nr_queued[WRITE]) in tg_update_carryover()
823 __tg_update_carryover(tg, WRITE); in tg_update_carryover()
827 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE], in tg_update_carryover()
828 tg->carryover_ios[READ], tg->carryover_ios[WRITE]); in tg_update_carryover()
1019 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
1108 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
1142 if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) in throtl_select_dispatch()
1201 sq->nr_queued[READ] + sq->nr_queued[WRITE], in throtl_pending_timer_fn()
1202 sq->nr_queued[READ], sq->nr_queued[WRITE]); in throtl_pending_timer_fn()
1263 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1320 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1321 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1360 throtl_start_new_slice(tg, WRITE, false); in tg_conf_updated()
1451 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1463 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1513 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1515 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1524 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1526 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1530 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1532 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1582 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1584 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1628 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1630 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1634 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1636 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1640 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1641 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1644 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1645 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1651 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1655 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1657 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1769 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1770 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1792 !parent->bps[WRITE][LIMIT_LOW] && in tg_last_low_overflow_time()
1793 !parent->iops[WRITE][LIMIT_LOW]) in tg_last_low_overflow_time()
1849 throtl_low_limit_reached(tg, WRITE)) in throtl_tg_can_upgrade()
2021 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2022 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2024 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2025 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2034 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2035 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2036 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2037 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2048 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2050 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2084 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2115 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2143 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2144 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2251 sq->nr_queued[READ], sq->nr_queued[WRITE]); in __blk_throtl_bio()
2381 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2383 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2404 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2419 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2440 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register()