Lines Matching refs:ioc

402 struct ioc {  struct
460 struct ioc *ioc; argument
657 static struct ioc *rqos_to_ioc(struct rq_qos *rqos) in rqos_to_ioc()
659 return container_of(rqos, struct ioc, rqos); in rqos_to_ioc()
662 static struct ioc *q_to_ioc(struct request_queue *q) in q_to_ioc()
667 static const char __maybe_unused *ioc_name(struct ioc *ioc) in ioc_name() argument
669 struct gendisk *disk = ioc->rqos.q->disk; in ioc_name()
730 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock()
741 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock()
750 static void ioc_refresh_margins(struct ioc *ioc) in ioc_refresh_margins() argument
752 struct ioc_margins *margins = &ioc->margins; in ioc_refresh_margins()
753 u32 period_us = ioc->period_us; in ioc_refresh_margins()
754 u64 vrate = ioc->vtime_base_rate; in ioc_refresh_margins()
762 static void ioc_refresh_period_us(struct ioc *ioc) in ioc_refresh_period_us() argument
766 lockdep_assert_held(&ioc->lock); in ioc_refresh_period_us()
769 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { in ioc_refresh_period_us()
770 ppm = ioc->params.qos[QOS_RPPM]; in ioc_refresh_period_us()
771 lat = ioc->params.qos[QOS_RLAT]; in ioc_refresh_period_us()
773 ppm = ioc->params.qos[QOS_WPPM]; in ioc_refresh_period_us()
774 lat = ioc->params.qos[QOS_WLAT]; in ioc_refresh_period_us()
793 ioc->period_us = period_us; in ioc_refresh_period_us()
794 ioc->timer_slack_ns = div64_u64( in ioc_refresh_period_us()
797 ioc_refresh_margins(ioc); in ioc_refresh_period_us()
800 static int ioc_autop_idx(struct ioc *ioc) in ioc_autop_idx() argument
802 int idx = ioc->autop_idx; in ioc_autop_idx()
808 if (!blk_queue_nonrot(ioc->rqos.q)) in ioc_autop_idx()
812 if (blk_queue_depth(ioc->rqos.q) == 1) in ioc_autop_idx()
820 if (ioc->user_qos_params || ioc->user_cost_model) in ioc_autop_idx()
824 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); in ioc_autop_idx()
828 if (!ioc->autop_too_fast_at) in ioc_autop_idx()
829 ioc->autop_too_fast_at = now_ns; in ioc_autop_idx()
830 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
833 ioc->autop_too_fast_at = 0; in ioc_autop_idx()
837 if (!ioc->autop_too_slow_at) in ioc_autop_idx()
838 ioc->autop_too_slow_at = now_ns; in ioc_autop_idx()
839 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
842 ioc->autop_too_slow_at = 0; in ioc_autop_idx()
885 static void ioc_refresh_lcoefs(struct ioc *ioc) in ioc_refresh_lcoefs() argument
887 u64 *u = ioc->params.i_lcoefs; in ioc_refresh_lcoefs()
888 u64 *c = ioc->params.lcoefs; in ioc_refresh_lcoefs()
896 static bool ioc_refresh_params(struct ioc *ioc, bool force) in ioc_refresh_params() argument
901 lockdep_assert_held(&ioc->lock); in ioc_refresh_params()
903 idx = ioc_autop_idx(ioc); in ioc_refresh_params()
906 if (idx == ioc->autop_idx && !force) in ioc_refresh_params()
909 if (idx != ioc->autop_idx) in ioc_refresh_params()
910 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in ioc_refresh_params()
912 ioc->autop_idx = idx; in ioc_refresh_params()
913 ioc->autop_too_fast_at = 0; in ioc_refresh_params()
914 ioc->autop_too_slow_at = 0; in ioc_refresh_params()
916 if (!ioc->user_qos_params) in ioc_refresh_params()
917 memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); in ioc_refresh_params()
918 if (!ioc->user_cost_model) in ioc_refresh_params()
919 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); in ioc_refresh_params()
921 ioc_refresh_period_us(ioc); in ioc_refresh_params()
922 ioc_refresh_lcoefs(ioc); in ioc_refresh_params()
924 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * in ioc_refresh_params()
926 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] * in ioc_refresh_params()
939 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) in ioc_refresh_vrate() argument
941 s64 pleft = ioc->period_at + ioc->period_us - now->now; in ioc_refresh_vrate()
942 s64 vperiod = ioc->period_us * ioc->vtime_base_rate; in ioc_refresh_vrate()
945 lockdep_assert_held(&ioc->lock); in ioc_refresh_vrate()
956 vcomp = -div64_s64(ioc->vtime_err, pleft); in ioc_refresh_vrate()
957 vcomp_min = -(ioc->vtime_base_rate >> 1); in ioc_refresh_vrate()
958 vcomp_max = ioc->vtime_base_rate; in ioc_refresh_vrate()
961 ioc->vtime_err += vcomp * pleft; in ioc_refresh_vrate()
963 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); in ioc_refresh_vrate()
966 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); in ioc_refresh_vrate()
969 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct, in ioc_adjust_base_vrate() argument
973 u64 vrate = ioc->vtime_base_rate; in ioc_adjust_base_vrate()
974 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; in ioc_adjust_base_vrate()
976 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) { in ioc_adjust_base_vrate()
977 if (ioc->busy_level != prev_busy_level || nr_lagging) in ioc_adjust_base_vrate()
978 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), in ioc_adjust_base_vrate()
997 int idx = min_t(int, abs(ioc->busy_level), in ioc_adjust_base_vrate()
1001 if (ioc->busy_level > 0) in ioc_adjust_base_vrate()
1010 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, in ioc_adjust_base_vrate()
1013 ioc->vtime_base_rate = vrate; in ioc_adjust_base_vrate()
1014 ioc_refresh_margins(ioc); in ioc_adjust_base_vrate()
1018 static void ioc_now(struct ioc *ioc, struct ioc_now *now) in ioc_now() argument
1024 now->vrate = atomic64_read(&ioc->vtime_rate); in ioc_now()
1035 seq = read_seqcount_begin(&ioc->period_seqcount); in ioc_now()
1036 now->vnow = ioc->period_at_vtime + in ioc_now()
1037 (now->now - ioc->period_at) * now->vrate; in ioc_now()
1038 } while (read_seqcount_retry(&ioc->period_seqcount, seq)); in ioc_now()
1041 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) in ioc_start_period() argument
1043 WARN_ON_ONCE(ioc->running != IOC_RUNNING); in ioc_start_period()
1045 write_seqcount_begin(&ioc->period_seqcount); in ioc_start_period()
1046 ioc->period_at = now->now; in ioc_start_period()
1047 ioc->period_at_vtime = now->vnow; in ioc_start_period()
1048 write_seqcount_end(&ioc->period_seqcount); in ioc_start_period()
1050 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); in ioc_start_period()
1051 add_timer(&ioc->timer); in ioc_start_period()
1062 struct ioc *ioc = iocg->ioc; in __propagate_weights() local
1065 lockdep_assert_held(&ioc->lock); in __propagate_weights()
1119 ioc->weights_updated = true; in __propagate_weights()
1122 static void commit_weights(struct ioc *ioc) in commit_weights() argument
1124 lockdep_assert_held(&ioc->lock); in commit_weights()
1126 if (ioc->weights_updated) { in commit_weights()
1129 atomic_inc(&ioc->hweight_gen); in commit_weights()
1130 ioc->weights_updated = false; in commit_weights()
1138 commit_weights(iocg->ioc); in propagate_weights()
1143 struct ioc *ioc = iocg->ioc; in current_hweight() local
1149 ioc_gen = atomic_read(&ioc->hweight_gen); in current_hweight()
1206 lockdep_assert_held(&iocg->ioc->lock); in current_hweight_max()
1223 struct ioc *ioc = iocg->ioc; in weight_updated() local
1228 lockdep_assert_held(&ioc->lock); in weight_updated()
1238 struct ioc *ioc = iocg->ioc; in iocg_activate() local
1248 ioc_now(ioc, now); in iocg_activate()
1249 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1259 spin_lock_irq(&ioc->lock); in iocg_activate()
1261 ioc_now(ioc, now); in iocg_activate()
1264 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1282 vtarget = now->vnow - ioc->margins.target; in iocg_activate()
1294 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; in iocg_activate()
1295 list_add(&iocg->active_list, &ioc->active_iocgs); in iocg_activate()
1305 if (ioc->running == IOC_IDLE) { in iocg_activate()
1306 ioc->running = IOC_RUNNING; in iocg_activate()
1307 ioc->dfgv_period_at = now->now; in iocg_activate()
1308 ioc->dfgv_period_rem = 0; in iocg_activate()
1309 ioc_start_period(ioc, now); in iocg_activate()
1313 spin_unlock_irq(&ioc->lock); in iocg_activate()
1317 spin_unlock_irq(&ioc->lock); in iocg_activate()
1323 struct ioc *ioc = iocg->ioc; in iocg_kick_delay() local
1343 ioc->period_us * ioc->vtime_base_rate); in iocg_kick_delay()
1383 lockdep_assert_held(&iocg->ioc->lock); in iocg_incur_debt()
1406 lockdep_assert_held(&iocg->ioc->lock); in iocg_pay_debt()
1460 struct ioc *ioc = iocg->ioc; in iocg_kick_waitq() local
1477 lockdep_assert_held(&ioc->lock); in iocg_kick_waitq()
1526 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * in iocg_kick_waitq()
1528 expires += ioc->timer_slack_ns; in iocg_kick_waitq()
1533 abs(oexpires - expires) <= ioc->timer_slack_ns) in iocg_kick_waitq()
1537 ioc->timer_slack_ns, HRTIMER_MODE_ABS); in iocg_kick_waitq()
1547 ioc_now(iocg->ioc, &now); in iocg_waitq_timer_fn()
1556 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) in ioc_lat_stat() argument
1564 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat()
1592 ioc->period_us * NSEC_PER_USEC); in ioc_lat_stat()
1598 struct ioc *ioc = iocg->ioc; in iocg_is_idle() local
1602 atomic64_read(&ioc->cur_period)) in iocg_is_idle()
1662 struct ioc *ioc = iocg->ioc; in iocg_flush_stat_leaf() local
1667 lockdep_assert_held(&iocg->ioc->lock); in iocg_flush_stat_leaf()
1677 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); in iocg_flush_stat_leaf()
1710 struct ioc *ioc = iocg->ioc; in hweight_after_donation() local
1720 time_after64(vtime, now->vnow - ioc->margins.min)) in hweight_after_donation()
1724 excess = now->vnow - vtime - ioc->margins.target; in hweight_after_donation()
1729 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); in hweight_after_donation()
1749 now->vnow - ioc->period_at_vtime); in hweight_after_donation()
2039 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, in ioc_forgive_debts() argument
2047 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2048 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2049 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2059 if (ioc->busy_level > 0) in ioc_forgive_debts()
2060 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); in ioc_forgive_debts()
2062 ioc->dfgv_usage_us_sum += usage_us_sum; in ioc_forgive_debts()
2063 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) in ioc_forgive_debts()
2070 dur = now->now - ioc->dfgv_period_at; in ioc_forgive_debts()
2071 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur); in ioc_forgive_debts()
2073 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2074 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2078 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2091 nr_cycles = dur + ioc->dfgv_period_rem; in ioc_forgive_debts()
2092 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); in ioc_forgive_debts()
2094 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_forgive_debts()
2130 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now) in ioc_check_iocgs() argument
2135 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { in ioc_check_iocgs()
2175 excess = now->vnow - vtime - ioc->margins.target; in ioc_check_iocgs()
2180 ioc->vtime_err -= div64_u64(excess * old_hwi, in ioc_check_iocgs()
2186 atomic64_read(&ioc->cur_period), vtime); in ioc_check_iocgs()
2194 commit_weights(ioc); in ioc_check_iocgs()
2200 struct ioc *ioc = container_of(timer, struct ioc, timer); in ioc_timer_fn() local
2206 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; in ioc_timer_fn()
2207 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; in ioc_timer_fn()
2213 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); in ioc_timer_fn()
2216 spin_lock_irq(&ioc->lock); in ioc_timer_fn()
2218 ioc_now(ioc, &now); in ioc_timer_fn()
2220 period_vtime = now.vnow - ioc->period_at_vtime; in ioc_timer_fn()
2222 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2226 nr_debtors = ioc_check_iocgs(ioc, &now); in ioc_timer_fn()
2232 iocg_flush_stat(&ioc->active_iocgs, &now); in ioc_timer_fn()
2235 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2272 time_before64(vtime, now.vnow - ioc->margins.low))) { in ioc_timer_fn()
2279 ioc->vtime_base_rate); in ioc_timer_fn()
2285 if (time_after64(iocg->activated_at, ioc->period_at)) in ioc_timer_fn()
2288 usage_dur = max_t(u64, now.now - ioc->period_at, 1); in ioc_timer_fn()
2342 commit_weights(ioc); in ioc_timer_fn()
2354 prev_busy_level = ioc->busy_level; in ioc_timer_fn()
2359 ioc->busy_level = max(ioc->busy_level, 0); in ioc_timer_fn()
2360 ioc->busy_level++; in ioc_timer_fn()
2370 ioc->busy_level = min(ioc->busy_level, 0); in ioc_timer_fn()
2377 ioc->busy_level--; in ioc_timer_fn()
2385 ioc->busy_level = 0; in ioc_timer_fn()
2389 ioc->busy_level = 0; in ioc_timer_fn()
2392 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); in ioc_timer_fn()
2394 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages, in ioc_timer_fn()
2397 ioc_refresh_params(ioc, false); in ioc_timer_fn()
2399 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now); in ioc_timer_fn()
2405 atomic64_inc(&ioc->cur_period); in ioc_timer_fn()
2407 if (ioc->running != IOC_STOP) { in ioc_timer_fn()
2408 if (!list_empty(&ioc->active_iocgs)) { in ioc_timer_fn()
2409 ioc_start_period(ioc, &now); in ioc_timer_fn()
2411 ioc->busy_level = 0; in ioc_timer_fn()
2412 ioc->vtime_err = 0; in ioc_timer_fn()
2413 ioc->running = IOC_IDLE; in ioc_timer_fn()
2416 ioc_refresh_vrate(ioc, &now); in ioc_timer_fn()
2419 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2425 struct ioc *ioc = iocg->ioc; in adjust_inuse_and_calc_cost() local
2426 struct ioc_margins *margins = &ioc->margins; in adjust_inuse_and_calc_cost()
2449 spin_lock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2453 spin_unlock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2474 spin_unlock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2485 struct ioc *ioc = iocg->ioc; in calc_vtime_cost_builtin() local
2493 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; in calc_vtime_cost_builtin()
2494 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; in calc_vtime_cost_builtin()
2495 coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; in calc_vtime_cost_builtin()
2498 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; in calc_vtime_cost_builtin()
2499 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; in calc_vtime_cost_builtin()
2500 coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; in calc_vtime_cost_builtin()
2531 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, in calc_size_vtime_cost_builtin() argument
2538 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; in calc_size_vtime_cost_builtin()
2541 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; in calc_size_vtime_cost_builtin()
2548 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) in calc_size_vtime_cost() argument
2552 calc_size_vtime_cost_builtin(rq, ioc, &cost); in calc_size_vtime_cost()
2559 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_throttle() local
2568 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_throttle()
2695 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_merge() local
2702 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_merge()
2709 ioc_now(ioc, &now); in ioc_rqos_merge()
2734 spin_lock_irqsave(&ioc->lock, flags); in ioc_rqos_merge()
2747 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_rqos_merge()
2760 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_done() local
2765 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) in ioc_rqos_done()
2783 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); in ioc_rqos_done()
2785 ccs = get_cpu_ptr(ioc->pcpu_stat); in ioc_rqos_done()
2788 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) in ioc_rqos_done()
2800 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_queue_depth_changed() local
2802 spin_lock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2803 ioc_refresh_params(ioc, false); in ioc_rqos_queue_depth_changed()
2804 spin_unlock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2809 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_exit() local
2813 spin_lock_irq(&ioc->lock); in ioc_rqos_exit()
2814 ioc->running = IOC_STOP; in ioc_rqos_exit()
2815 spin_unlock_irq(&ioc->lock); in ioc_rqos_exit()
2817 del_timer_sync(&ioc->timer); in ioc_rqos_exit()
2818 free_percpu(ioc->pcpu_stat); in ioc_rqos_exit()
2819 kfree(ioc); in ioc_rqos_exit()
2834 struct ioc *ioc; in blk_iocost_init() local
2838 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); in blk_iocost_init()
2839 if (!ioc) in blk_iocost_init()
2842 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); in blk_iocost_init()
2843 if (!ioc->pcpu_stat) { in blk_iocost_init()
2844 kfree(ioc); in blk_iocost_init()
2849 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
2858 rqos = &ioc->rqos; in blk_iocost_init()
2863 spin_lock_init(&ioc->lock); in blk_iocost_init()
2864 timer_setup(&ioc->timer, ioc_timer_fn, 0); in blk_iocost_init()
2865 INIT_LIST_HEAD(&ioc->active_iocgs); in blk_iocost_init()
2867 ioc->running = IOC_IDLE; in blk_iocost_init()
2868 ioc->vtime_base_rate = VTIME_PER_USEC; in blk_iocost_init()
2869 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in blk_iocost_init()
2870 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); in blk_iocost_init()
2871 ioc->period_at = ktime_to_us(ktime_get()); in blk_iocost_init()
2872 atomic64_set(&ioc->cur_period, 0); in blk_iocost_init()
2873 atomic_set(&ioc->hweight_gen, 0); in blk_iocost_init()
2875 spin_lock_irq(&ioc->lock); in blk_iocost_init()
2876 ioc->autop_idx = AUTOP_INVALID; in blk_iocost_init()
2877 ioc_refresh_params(ioc, true); in blk_iocost_init()
2878 spin_unlock_irq(&ioc->lock); in blk_iocost_init()
2898 free_percpu(ioc->pcpu_stat); in blk_iocost_init()
2899 kfree(ioc); in blk_iocost_init()
2943 struct ioc *ioc = q_to_ioc(blkg->q); in ioc_pd_init() local
2948 ioc_now(ioc, &now); in ioc_pd_init()
2950 iocg->ioc = ioc; in ioc_pd_init()
2953 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); in ioc_pd_init()
2971 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_init()
2973 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_init()
2979 struct ioc *ioc = iocg->ioc; in ioc_pd_free() local
2982 if (ioc) { in ioc_pd_free()
2983 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_free()
2988 ioc_now(ioc, &now); in ioc_pd_free()
2996 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_free()
3007 struct ioc *ioc = iocg->ioc; in ioc_pd_stat() local
3009 if (!ioc->enabled) in ioc_pd_stat()
3014 ioc->vtime_base_rate * 10000, in ioc_pd_stat()
3077 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3078 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3080 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3103 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3105 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3107 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3121 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_qos_prfill() local
3127 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", in ioc_qos_prfill()
3128 ioc->params.qos[QOS_RPPM] / 10000, in ioc_qos_prfill()
3129 ioc->params.qos[QOS_RPPM] % 10000 / 100, in ioc_qos_prfill()
3130 ioc->params.qos[QOS_RLAT], in ioc_qos_prfill()
3131 ioc->params.qos[QOS_WPPM] / 10000, in ioc_qos_prfill()
3132 ioc->params.qos[QOS_WPPM] % 10000 / 100, in ioc_qos_prfill()
3133 ioc->params.qos[QOS_WLAT], in ioc_qos_prfill()
3134 ioc->params.qos[QOS_MIN] / 10000, in ioc_qos_prfill()
3135 ioc->params.qos[QOS_MIN] % 10000 / 100, in ioc_qos_prfill()
3136 ioc->params.qos[QOS_MAX] / 10000, in ioc_qos_prfill()
3137 ioc->params.qos[QOS_MAX] % 10000 / 100); in ioc_qos_prfill()
3171 struct ioc *ioc; in ioc_qos_write() local
3182 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3183 if (!ioc) { in ioc_qos_write()
3187 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3190 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3191 memcpy(qos, ioc->params.qos, sizeof(qos)); in ioc_qos_write()
3192 enable = ioc->enabled; in ioc_qos_write()
3193 user = ioc->user_qos_params; in ioc_qos_write()
3194 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3261 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3266 ioc->enabled = true; in ioc_qos_write()
3269 ioc->enabled = false; in ioc_qos_write()
3273 memcpy(ioc->params.qos, qos, sizeof(qos)); in ioc_qos_write()
3274 ioc->user_qos_params = true; in ioc_qos_write()
3276 ioc->user_qos_params = false; in ioc_qos_write()
3279 ioc_refresh_params(ioc, true); in ioc_qos_write()
3280 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3295 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_cost_model_prfill() local
3296 u64 *u = ioc->params.i_lcoefs; in ioc_cost_model_prfill()
3304 dname, ioc->user_cost_model ? "user" : "auto", in ioc_cost_model_prfill()
3339 struct ioc *ioc; in ioc_cost_model_write() local
3349 ioc = q_to_ioc(bdev_get_queue(bdev)); in ioc_cost_model_write()
3350 if (!ioc) { in ioc_cost_model_write()
3354 ioc = q_to_ioc(bdev_get_queue(bdev)); in ioc_cost_model_write()
3357 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3358 memcpy(u, ioc->params.i_lcoefs, sizeof(u)); in ioc_cost_model_write()
3359 user = ioc->user_cost_model; in ioc_cost_model_write()
3360 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()
3397 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3399 memcpy(ioc->params.i_lcoefs, u, sizeof(u)); in ioc_cost_model_write()
3400 ioc->user_cost_model = true; in ioc_cost_model_write()
3402 ioc->user_cost_model = false; in ioc_cost_model_write()
3404 ioc_refresh_params(ioc, true); in ioc_cost_model_write()
3405 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()