Lines Matching refs:ioc

402 struct ioc {  struct
460 struct ioc *ioc; argument
658 static struct ioc *rqos_to_ioc(struct rq_qos *rqos) in rqos_to_ioc()
660 return container_of(rqos, struct ioc, rqos); in rqos_to_ioc()
663 static struct ioc *q_to_ioc(struct request_queue *q) in q_to_ioc()
676 static const char __maybe_unused *ioc_name(struct ioc *ioc) in ioc_name() argument
678 return q_name(ioc->rqos.q); in ioc_name()
735 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock()
746 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock()
755 static void ioc_refresh_margins(struct ioc *ioc) in ioc_refresh_margins() argument
757 struct ioc_margins *margins = &ioc->margins; in ioc_refresh_margins()
758 u32 period_us = ioc->period_us; in ioc_refresh_margins()
759 u64 vrate = ioc->vtime_base_rate; in ioc_refresh_margins()
767 static void ioc_refresh_period_us(struct ioc *ioc) in ioc_refresh_period_us() argument
771 lockdep_assert_held(&ioc->lock); in ioc_refresh_period_us()
774 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { in ioc_refresh_period_us()
775 ppm = ioc->params.qos[QOS_RPPM]; in ioc_refresh_period_us()
776 lat = ioc->params.qos[QOS_RLAT]; in ioc_refresh_period_us()
778 ppm = ioc->params.qos[QOS_WPPM]; in ioc_refresh_period_us()
779 lat = ioc->params.qos[QOS_WLAT]; in ioc_refresh_period_us()
798 ioc->period_us = period_us; in ioc_refresh_period_us()
799 ioc->timer_slack_ns = div64_u64( in ioc_refresh_period_us()
802 ioc_refresh_margins(ioc); in ioc_refresh_period_us()
805 static int ioc_autop_idx(struct ioc *ioc) in ioc_autop_idx() argument
807 int idx = ioc->autop_idx; in ioc_autop_idx()
813 if (!blk_queue_nonrot(ioc->rqos.q)) in ioc_autop_idx()
817 if (blk_queue_depth(ioc->rqos.q) == 1) in ioc_autop_idx()
825 if (ioc->user_qos_params || ioc->user_cost_model) in ioc_autop_idx()
829 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); in ioc_autop_idx()
833 if (!ioc->autop_too_fast_at) in ioc_autop_idx()
834 ioc->autop_too_fast_at = now_ns; in ioc_autop_idx()
835 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
838 ioc->autop_too_fast_at = 0; in ioc_autop_idx()
842 if (!ioc->autop_too_slow_at) in ioc_autop_idx()
843 ioc->autop_too_slow_at = now_ns; in ioc_autop_idx()
844 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
847 ioc->autop_too_slow_at = 0; in ioc_autop_idx()
890 static void ioc_refresh_lcoefs(struct ioc *ioc) in ioc_refresh_lcoefs() argument
892 u64 *u = ioc->params.i_lcoefs; in ioc_refresh_lcoefs()
893 u64 *c = ioc->params.lcoefs; in ioc_refresh_lcoefs()
901 static bool ioc_refresh_params(struct ioc *ioc, bool force) in ioc_refresh_params() argument
906 lockdep_assert_held(&ioc->lock); in ioc_refresh_params()
908 idx = ioc_autop_idx(ioc); in ioc_refresh_params()
911 if (idx == ioc->autop_idx && !force) in ioc_refresh_params()
914 if (idx != ioc->autop_idx) in ioc_refresh_params()
915 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in ioc_refresh_params()
917 ioc->autop_idx = idx; in ioc_refresh_params()
918 ioc->autop_too_fast_at = 0; in ioc_refresh_params()
919 ioc->autop_too_slow_at = 0; in ioc_refresh_params()
921 if (!ioc->user_qos_params) in ioc_refresh_params()
922 memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); in ioc_refresh_params()
923 if (!ioc->user_cost_model) in ioc_refresh_params()
924 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); in ioc_refresh_params()
926 ioc_refresh_period_us(ioc); in ioc_refresh_params()
927 ioc_refresh_lcoefs(ioc); in ioc_refresh_params()
929 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * in ioc_refresh_params()
931 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] * in ioc_refresh_params()
944 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) in ioc_refresh_vrate() argument
946 s64 pleft = ioc->period_at + ioc->period_us - now->now; in ioc_refresh_vrate()
947 s64 vperiod = ioc->period_us * ioc->vtime_base_rate; in ioc_refresh_vrate()
950 lockdep_assert_held(&ioc->lock); in ioc_refresh_vrate()
961 vcomp = -div64_s64(ioc->vtime_err, pleft); in ioc_refresh_vrate()
962 vcomp_min = -(ioc->vtime_base_rate >> 1); in ioc_refresh_vrate()
963 vcomp_max = ioc->vtime_base_rate; in ioc_refresh_vrate()
966 ioc->vtime_err += vcomp * pleft; in ioc_refresh_vrate()
968 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); in ioc_refresh_vrate()
971 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); in ioc_refresh_vrate()
974 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct, in ioc_adjust_base_vrate() argument
978 u64 vrate = ioc->vtime_base_rate; in ioc_adjust_base_vrate()
979 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; in ioc_adjust_base_vrate()
981 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) { in ioc_adjust_base_vrate()
982 if (ioc->busy_level != prev_busy_level || nr_lagging) in ioc_adjust_base_vrate()
983 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), in ioc_adjust_base_vrate()
1002 int idx = min_t(int, abs(ioc->busy_level), in ioc_adjust_base_vrate()
1006 if (ioc->busy_level > 0) in ioc_adjust_base_vrate()
1015 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, in ioc_adjust_base_vrate()
1018 ioc->vtime_base_rate = vrate; in ioc_adjust_base_vrate()
1019 ioc_refresh_margins(ioc); in ioc_adjust_base_vrate()
1023 static void ioc_now(struct ioc *ioc, struct ioc_now *now) in ioc_now() argument
1029 now->vrate = atomic64_read(&ioc->vtime_rate); in ioc_now()
1040 seq = read_seqcount_begin(&ioc->period_seqcount); in ioc_now()
1041 now->vnow = ioc->period_at_vtime + in ioc_now()
1042 (now->now - ioc->period_at) * now->vrate; in ioc_now()
1043 } while (read_seqcount_retry(&ioc->period_seqcount, seq)); in ioc_now()
1046 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) in ioc_start_period() argument
1048 WARN_ON_ONCE(ioc->running != IOC_RUNNING); in ioc_start_period()
1050 write_seqcount_begin(&ioc->period_seqcount); in ioc_start_period()
1051 ioc->period_at = now->now; in ioc_start_period()
1052 ioc->period_at_vtime = now->vnow; in ioc_start_period()
1053 write_seqcount_end(&ioc->period_seqcount); in ioc_start_period()
1055 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); in ioc_start_period()
1056 add_timer(&ioc->timer); in ioc_start_period()
1067 struct ioc *ioc = iocg->ioc; in __propagate_weights() local
1070 lockdep_assert_held(&ioc->lock); in __propagate_weights()
1124 ioc->weights_updated = true; in __propagate_weights()
1127 static void commit_weights(struct ioc *ioc) in commit_weights() argument
1129 lockdep_assert_held(&ioc->lock); in commit_weights()
1131 if (ioc->weights_updated) { in commit_weights()
1134 atomic_inc(&ioc->hweight_gen); in commit_weights()
1135 ioc->weights_updated = false; in commit_weights()
1143 commit_weights(iocg->ioc); in propagate_weights()
1148 struct ioc *ioc = iocg->ioc; in current_hweight() local
1154 ioc_gen = atomic_read(&ioc->hweight_gen); in current_hweight()
1211 lockdep_assert_held(&iocg->ioc->lock); in current_hweight_max()
1228 struct ioc *ioc = iocg->ioc; in weight_updated() local
1233 lockdep_assert_held(&ioc->lock); in weight_updated()
1243 struct ioc *ioc = iocg->ioc; in iocg_activate() local
1253 ioc_now(ioc, now); in iocg_activate()
1254 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1264 spin_lock_irq(&ioc->lock); in iocg_activate()
1266 ioc_now(ioc, now); in iocg_activate()
1269 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1287 vtarget = now->vnow - ioc->margins.target; in iocg_activate()
1299 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; in iocg_activate()
1300 list_add(&iocg->active_list, &ioc->active_iocgs); in iocg_activate()
1310 if (ioc->running == IOC_IDLE) { in iocg_activate()
1311 ioc->running = IOC_RUNNING; in iocg_activate()
1312 ioc->dfgv_period_at = now->now; in iocg_activate()
1313 ioc->dfgv_period_rem = 0; in iocg_activate()
1314 ioc_start_period(ioc, now); in iocg_activate()
1318 spin_unlock_irq(&ioc->lock); in iocg_activate()
1322 spin_unlock_irq(&ioc->lock); in iocg_activate()
1328 struct ioc *ioc = iocg->ioc; in iocg_kick_delay() local
1348 ioc->period_us * ioc->vtime_base_rate); in iocg_kick_delay()
1388 lockdep_assert_held(&iocg->ioc->lock); in iocg_incur_debt()
1411 lockdep_assert_held(&iocg->ioc->lock); in iocg_pay_debt()
1465 struct ioc *ioc = iocg->ioc; in iocg_kick_waitq() local
1482 lockdep_assert_held(&ioc->lock); in iocg_kick_waitq()
1531 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * in iocg_kick_waitq()
1533 expires += ioc->timer_slack_ns; in iocg_kick_waitq()
1538 abs(oexpires - expires) <= ioc->timer_slack_ns) in iocg_kick_waitq()
1542 ioc->timer_slack_ns, HRTIMER_MODE_ABS); in iocg_kick_waitq()
1552 ioc_now(iocg->ioc, &now); in iocg_waitq_timer_fn()
1561 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) in ioc_lat_stat() argument
1569 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat()
1597 ioc->period_us * NSEC_PER_USEC); in ioc_lat_stat()
1603 struct ioc *ioc = iocg->ioc; in iocg_is_idle() local
1607 atomic64_read(&ioc->cur_period)) in iocg_is_idle()
1647 struct ioc *ioc = iocg->ioc; in iocg_flush_stat_one() local
1653 lockdep_assert_held(&iocg->ioc->lock); in iocg_flush_stat_one()
1663 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); in iocg_flush_stat_one()
1721 struct ioc *ioc = iocg->ioc; in hweight_after_donation() local
1731 time_after64(vtime, now->vnow - ioc->margins.min)) in hweight_after_donation()
1735 excess = now->vnow - vtime - ioc->margins.target; in hweight_after_donation()
1740 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); in hweight_after_donation()
1760 now->vnow - ioc->period_at_vtime); in hweight_after_donation()
2050 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, in ioc_forgive_debts() argument
2058 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2059 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2060 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2070 if (ioc->busy_level > 0) in ioc_forgive_debts()
2071 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); in ioc_forgive_debts()
2073 ioc->dfgv_usage_us_sum += usage_us_sum; in ioc_forgive_debts()
2074 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) in ioc_forgive_debts()
2081 dur = now->now - ioc->dfgv_period_at; in ioc_forgive_debts()
2082 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur); in ioc_forgive_debts()
2084 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2085 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2089 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2102 nr_cycles = dur + ioc->dfgv_period_rem; in ioc_forgive_debts()
2103 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); in ioc_forgive_debts()
2105 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_forgive_debts()
2141 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now) in ioc_check_iocgs() argument
2146 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { in ioc_check_iocgs()
2186 excess = now->vnow - vtime - ioc->margins.target; in ioc_check_iocgs()
2191 ioc->vtime_err -= div64_u64(excess * old_hwi, in ioc_check_iocgs()
2197 atomic64_read(&ioc->cur_period), vtime); in ioc_check_iocgs()
2205 commit_weights(ioc); in ioc_check_iocgs()
2211 struct ioc *ioc = container_of(timer, struct ioc, timer); in ioc_timer_fn() local
2217 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; in ioc_timer_fn()
2218 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; in ioc_timer_fn()
2224 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); in ioc_timer_fn()
2227 spin_lock_irq(&ioc->lock); in ioc_timer_fn()
2229 ioc_now(ioc, &now); in ioc_timer_fn()
2231 period_vtime = now.vnow - ioc->period_at_vtime; in ioc_timer_fn()
2233 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2237 nr_debtors = ioc_check_iocgs(ioc, &now); in ioc_timer_fn()
2243 iocg_flush_stat(&ioc->active_iocgs, &now); in ioc_timer_fn()
2246 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2283 time_before64(vtime, now.vnow - ioc->margins.low))) { in ioc_timer_fn()
2290 ioc->vtime_base_rate); in ioc_timer_fn()
2296 if (time_after64(iocg->activated_at, ioc->period_at)) in ioc_timer_fn()
2299 usage_dur = max_t(u64, now.now - ioc->period_at, 1); in ioc_timer_fn()
2336 commit_weights(ioc); in ioc_timer_fn()
2348 prev_busy_level = ioc->busy_level; in ioc_timer_fn()
2353 ioc->busy_level = max(ioc->busy_level, 0); in ioc_timer_fn()
2354 ioc->busy_level++; in ioc_timer_fn()
2364 ioc->busy_level = min(ioc->busy_level, 0); in ioc_timer_fn()
2371 ioc->busy_level--; in ioc_timer_fn()
2379 ioc->busy_level = 0; in ioc_timer_fn()
2383 ioc->busy_level = 0; in ioc_timer_fn()
2386 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); in ioc_timer_fn()
2388 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages, in ioc_timer_fn()
2391 ioc_refresh_params(ioc, false); in ioc_timer_fn()
2393 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now); in ioc_timer_fn()
2399 atomic64_inc(&ioc->cur_period); in ioc_timer_fn()
2401 if (ioc->running != IOC_STOP) { in ioc_timer_fn()
2402 if (!list_empty(&ioc->active_iocgs)) { in ioc_timer_fn()
2403 ioc_start_period(ioc, &now); in ioc_timer_fn()
2405 ioc->busy_level = 0; in ioc_timer_fn()
2406 ioc->vtime_err = 0; in ioc_timer_fn()
2407 ioc->running = IOC_IDLE; in ioc_timer_fn()
2410 ioc_refresh_vrate(ioc, &now); in ioc_timer_fn()
2413 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2419 struct ioc *ioc = iocg->ioc; in adjust_inuse_and_calc_cost() local
2420 struct ioc_margins *margins = &ioc->margins; in adjust_inuse_and_calc_cost()
2443 spin_lock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2447 spin_unlock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2468 spin_unlock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2479 struct ioc *ioc = iocg->ioc; in calc_vtime_cost_builtin() local
2487 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; in calc_vtime_cost_builtin()
2488 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; in calc_vtime_cost_builtin()
2489 coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; in calc_vtime_cost_builtin()
2492 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; in calc_vtime_cost_builtin()
2493 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; in calc_vtime_cost_builtin()
2494 coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; in calc_vtime_cost_builtin()
2525 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, in calc_size_vtime_cost_builtin() argument
2532 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; in calc_size_vtime_cost_builtin()
2535 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; in calc_size_vtime_cost_builtin()
2542 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) in calc_size_vtime_cost() argument
2546 calc_size_vtime_cost_builtin(rq, ioc, &cost); in calc_size_vtime_cost()
2553 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_throttle() local
2562 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_throttle()
2689 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_merge() local
2696 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_merge()
2703 ioc_now(ioc, &now); in ioc_rqos_merge()
2728 spin_lock_irqsave(&ioc->lock, flags); in ioc_rqos_merge()
2741 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_rqos_merge()
2754 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_done() local
2759 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) in ioc_rqos_done()
2777 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); in ioc_rqos_done()
2779 ccs = get_cpu_ptr(ioc->pcpu_stat); in ioc_rqos_done()
2782 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) in ioc_rqos_done()
2794 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_queue_depth_changed() local
2796 spin_lock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2797 ioc_refresh_params(ioc, false); in ioc_rqos_queue_depth_changed()
2798 spin_unlock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2803 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_exit() local
2807 spin_lock_irq(&ioc->lock); in ioc_rqos_exit()
2808 ioc->running = IOC_STOP; in ioc_rqos_exit()
2809 spin_unlock_irq(&ioc->lock); in ioc_rqos_exit()
2811 del_timer_sync(&ioc->timer); in ioc_rqos_exit()
2812 free_percpu(ioc->pcpu_stat); in ioc_rqos_exit()
2813 kfree(ioc); in ioc_rqos_exit()
2827 struct ioc *ioc; in blk_iocost_init() local
2831 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); in blk_iocost_init()
2832 if (!ioc) in blk_iocost_init()
2835 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); in blk_iocost_init()
2836 if (!ioc->pcpu_stat) { in blk_iocost_init()
2837 kfree(ioc); in blk_iocost_init()
2842 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
2851 rqos = &ioc->rqos; in blk_iocost_init()
2856 spin_lock_init(&ioc->lock); in blk_iocost_init()
2857 timer_setup(&ioc->timer, ioc_timer_fn, 0); in blk_iocost_init()
2858 INIT_LIST_HEAD(&ioc->active_iocgs); in blk_iocost_init()
2860 ioc->running = IOC_IDLE; in blk_iocost_init()
2861 ioc->vtime_base_rate = VTIME_PER_USEC; in blk_iocost_init()
2862 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in blk_iocost_init()
2863 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); in blk_iocost_init()
2864 ioc->period_at = ktime_to_us(ktime_get()); in blk_iocost_init()
2865 atomic64_set(&ioc->cur_period, 0); in blk_iocost_init()
2866 atomic_set(&ioc->hweight_gen, 0); in blk_iocost_init()
2868 spin_lock_irq(&ioc->lock); in blk_iocost_init()
2869 ioc->autop_idx = AUTOP_INVALID; in blk_iocost_init()
2870 ioc_refresh_params(ioc, true); in blk_iocost_init()
2871 spin_unlock_irq(&ioc->lock); in blk_iocost_init()
2883 free_percpu(ioc->pcpu_stat); in blk_iocost_init()
2884 kfree(ioc); in blk_iocost_init()
2930 struct ioc *ioc = q_to_ioc(blkg->q); in ioc_pd_init() local
2935 ioc_now(ioc, &now); in ioc_pd_init()
2937 iocg->ioc = ioc; in ioc_pd_init()
2940 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); in ioc_pd_init()
2958 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_init()
2960 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_init()
2966 struct ioc *ioc = iocg->ioc; in ioc_pd_free() local
2969 if (ioc) { in ioc_pd_free()
2970 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_free()
2975 ioc_now(ioc, &now); in ioc_pd_free()
2983 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_free()
2994 struct ioc *ioc = iocg->ioc; in ioc_pd_stat() local
2996 if (!ioc->enabled) in ioc_pd_stat()
3001 ioc->vtime_base_rate * 10000, in ioc_pd_stat()
3065 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3066 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3068 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3091 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3093 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3095 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3109 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_qos_prfill() local
3115 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", in ioc_qos_prfill()
3116 ioc->params.qos[QOS_RPPM] / 10000, in ioc_qos_prfill()
3117 ioc->params.qos[QOS_RPPM] % 10000 / 100, in ioc_qos_prfill()
3118 ioc->params.qos[QOS_RLAT], in ioc_qos_prfill()
3119 ioc->params.qos[QOS_WPPM] / 10000, in ioc_qos_prfill()
3120 ioc->params.qos[QOS_WPPM] % 10000 / 100, in ioc_qos_prfill()
3121 ioc->params.qos[QOS_WLAT], in ioc_qos_prfill()
3122 ioc->params.qos[QOS_MIN] / 10000, in ioc_qos_prfill()
3123 ioc->params.qos[QOS_MIN] % 10000 / 100, in ioc_qos_prfill()
3124 ioc->params.qos[QOS_MAX] / 10000, in ioc_qos_prfill()
3125 ioc->params.qos[QOS_MAX] % 10000 / 100); in ioc_qos_prfill()
3158 struct ioc *ioc; in ioc_qos_write() local
3168 ioc = q_to_ioc(bdev->bd_disk->queue); in ioc_qos_write()
3169 if (!ioc) { in ioc_qos_write()
3173 ioc = q_to_ioc(bdev->bd_disk->queue); in ioc_qos_write()
3176 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3177 memcpy(qos, ioc->params.qos, sizeof(qos)); in ioc_qos_write()
3178 enable = ioc->enabled; in ioc_qos_write()
3179 user = ioc->user_qos_params; in ioc_qos_write()
3180 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3247 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3250 blk_stat_enable_accounting(ioc->rqos.q); in ioc_qos_write()
3251 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); in ioc_qos_write()
3252 ioc->enabled = true; in ioc_qos_write()
3254 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); in ioc_qos_write()
3255 ioc->enabled = false; in ioc_qos_write()
3259 memcpy(ioc->params.qos, qos, sizeof(qos)); in ioc_qos_write()
3260 ioc->user_qos_params = true; in ioc_qos_write()
3262 ioc->user_qos_params = false; in ioc_qos_write()
3265 ioc_refresh_params(ioc, true); in ioc_qos_write()
3266 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3281 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_cost_model_prfill() local
3282 u64 *u = ioc->params.i_lcoefs; in ioc_cost_model_prfill()
3290 dname, ioc->user_cost_model ? "user" : "auto", in ioc_cost_model_prfill()
3325 struct ioc *ioc; in ioc_cost_model_write() local
3335 ioc = q_to_ioc(bdev->bd_disk->queue); in ioc_cost_model_write()
3336 if (!ioc) { in ioc_cost_model_write()
3340 ioc = q_to_ioc(bdev->bd_disk->queue); in ioc_cost_model_write()
3343 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3344 memcpy(u, ioc->params.i_lcoefs, sizeof(u)); in ioc_cost_model_write()
3345 user = ioc->user_cost_model; in ioc_cost_model_write()
3346 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()
3383 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3385 memcpy(ioc->params.i_lcoefs, u, sizeof(u)); in ioc_cost_model_write()
3386 ioc->user_cost_model = true; in ioc_cost_model_write()
3388 ioc->user_cost_model = false; in ioc_cost_model_write()
3390 ioc_refresh_params(ioc, true); in ioc_cost_model_write()
3391 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()