Lines Matching refs:cfqd
115 struct cfq_data *cfqd; member
402 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
659 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ argument
660 blk_add_cgroup_trace_msg((cfqd)->queue, \
668 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ argument
669 blk_add_cgroup_trace_msg((cfqd)->queue, \
786 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ argument
787 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
791 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) argument
808 #define cfq_log(cfqd, fmt, args...) \ argument
809 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
821 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, in cfq_io_thinktime_big() argument
828 slice = cfqd->cfq_group_idle; in cfq_io_thinktime_big()
830 slice = cfqd->cfq_slice_idle; in cfq_io_thinktime_big()
834 static inline bool iops_mode(struct cfq_data *cfqd) in iops_mode() argument
843 if (!cfqd->cfq_slice_idle && cfqd->hw_tag) in iops_mode()
869 struct cfq_data *cfqd, in cfq_group_busy_queues_wl() argument
880 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, in cfqg_busy_async_queues() argument
888 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
897 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, in cfq_cic_lookup() argument
901 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); in cfq_cic_lookup()
925 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) in cfq_schedule_dispatch() argument
927 if (cfqd->busy_queues) { in cfq_schedule_dispatch()
928 cfq_log(cfqd, "schedule dispatch"); in cfq_schedule_dispatch()
929 kblockd_schedule_work(&cfqd->unplug_work); in cfq_schedule_dispatch()
938 static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync, in cfq_prio_slice() argument
941 u64 base_slice = cfqd->cfq_slice[sync]; in cfq_prio_slice()
950 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_to_slice() argument
952 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); in cfq_prio_to_slice()
1002 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, in cfq_group_get_avg_queues() argument
1008 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); in cfq_group_get_avg_queues()
1018 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_slice() argument
1020 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; in cfq_group_slice()
1024 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_scaled_cfqq_slice() argument
1026 u64 slice = cfq_prio_to_slice(cfqd, cfqq); in cfq_scaled_cfqq_slice()
1027 if (cfqd->cfq_latency) { in cfq_scaled_cfqq_slice()
1032 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, in cfq_scaled_cfqq_slice()
1034 u64 sync_slice = cfqd->cfq_slice[1]; in cfq_scaled_cfqq_slice()
1036 u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg); in cfq_scaled_cfqq_slice()
1039 u64 base_low_slice = 2 * cfqd->cfq_slice_idle; in cfq_scaled_cfqq_slice()
1056 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_set_prio_slice() argument
1058 u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq); in cfq_set_prio_slice()
1064 cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now); in cfq_set_prio_slice()
1088 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) in cfq_choose_req() argument
1113 back_max = cfqd->cfq_back_max * 2; in cfq_choose_req()
1123 d1 = (last - s1) * cfqd->cfq_back_penalty; in cfq_choose_req()
1130 d2 = (last - s2) * cfqd->cfq_back_penalty; in cfq_choose_req()
1201 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_find_next_rq() argument
1221 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); in cfq_find_next_rq()
1224 static u64 cfq_slice_offset(struct cfq_data *cfqd, in cfq_slice_offset() argument
1230 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - in cfq_slice_offset()
1231 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); in cfq_slice_offset()
1343 static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd) in cfq_get_cfqg_vdisktime_delay() argument
1345 if (!iops_mode(cfqd)) in cfq_get_cfqg_vdisktime_delay()
1352 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_add() argument
1354 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_notify_queue_add()
1371 cfq_get_cfqg_vdisktime_delay(cfqd); in cfq_group_notify_queue_add()
1411 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) in cfq_group_notify_queue_del() argument
1413 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_notify_queue_del()
1422 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); in cfq_group_notify_queue_del()
1461 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, in cfq_group_served() argument
1464 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_group_served()
1466 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) in cfq_group_served()
1474 if (iops_mode(cfqd)) in cfq_group_served()
1491 if (cfqd->workload_expires > now) { in cfq_group_served()
1492 cfqg->saved_wl_slice = cfqd->workload_expires - now; in cfq_group_served()
1493 cfqg->saved_wl_type = cfqd->serving_wl_type; in cfq_group_served()
1494 cfqg->saved_wl_class = cfqd->serving_wl_class; in cfq_group_served()
1498 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, in cfq_group_served()
1500 cfq_log_cfqq(cfqq->cfqd, cfqq, in cfq_group_served()
1503 iops_mode(cfqd), cfqq->nr_sectors); in cfq_group_served()
1680 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, in cfq_lookup_cfqg() argument
1685 blkg = blkg_lookup(blkcg, cfqd->queue); in cfq_lookup_cfqg()
2184 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, in cfq_lookup_cfqg() argument
2187 return cfqd->root_group; in cfq_lookup_cfqg()
2202 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_service_tree_add() argument
2229 rb_key = cfq_slice_offset(cfqd, cfqq) + now; in cfq_service_tree_add()
2274 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); in cfq_service_tree_add()
2278 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, in cfq_prio_tree_lookup() argument
2313 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_tree_add() argument
2328 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; in cfq_prio_tree_add()
2329 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, in cfq_prio_tree_add()
2341 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_resort_rr_list() argument
2347 cfq_service_tree_add(cfqd, cfqq, 0); in cfq_resort_rr_list()
2348 cfq_prio_tree_add(cfqd, cfqq); in cfq_resort_rr_list()
2356 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_add_cfqq_rr() argument
2358 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); in cfq_add_cfqq_rr()
2361 cfqd->busy_queues++; in cfq_add_cfqq_rr()
2363 cfqd->busy_sync_queues++; in cfq_add_cfqq_rr()
2365 cfq_resort_rr_list(cfqd, cfqq); in cfq_add_cfqq_rr()
2372 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_del_cfqq_rr() argument
2374 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); in cfq_del_cfqq_rr()
2387 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); in cfq_del_cfqq_rr()
2388 BUG_ON(!cfqd->busy_queues); in cfq_del_cfqq_rr()
2389 cfqd->busy_queues--; in cfq_del_cfqq_rr()
2391 cfqd->busy_sync_queues--; in cfq_del_cfqq_rr()
2423 struct cfq_data *cfqd = cfqq->cfqd; in cfq_add_rq_rb() local
2431 cfq_add_cfqq_rr(cfqd, cfqq); in cfq_add_rq_rb()
2437 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); in cfq_add_rq_rb()
2443 cfq_prio_tree_add(cfqd, cfqq); in cfq_add_rq_rb()
2454 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, in cfq_reposition_rq_rb()
2459 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) in cfq_find_rq_fmerge() argument
2465 cic = cfq_cic_lookup(cfqd, tsk->io_context); in cfq_find_rq_fmerge()
2478 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_activate_request() local
2480 cfqd->rq_in_driver++; in cfq_activate_request()
2481 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", in cfq_activate_request()
2482 cfqd->rq_in_driver); in cfq_activate_request()
2484 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in cfq_activate_request()
2489 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_deactivate_request() local
2491 WARN_ON(!cfqd->rq_in_driver); in cfq_deactivate_request()
2492 cfqd->rq_in_driver--; in cfq_deactivate_request()
2493 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", in cfq_deactivate_request()
2494 cfqd->rq_in_driver); in cfq_deactivate_request()
2502 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); in cfq_remove_request()
2507 cfqq->cfqd->rq_queued--; in cfq_remove_request()
2518 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_merge() local
2521 __rq = cfq_find_rq_fmerge(cfqd, bio); in cfq_merge()
2551 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_merged_requests() local
2575 cfqq != cfqd->active_queue) in cfq_merged_requests()
2576 cfq_del_cfqq_rr(cfqd, cfqq); in cfq_merged_requests()
2582 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_allow_bio_merge() local
2597 cic = cfq_cic_lookup(cfqd, current->io_context); in cfq_allow_bio_merge()
2611 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_del_timer() argument
2613 hrtimer_try_to_cancel(&cfqd->idle_slice_timer); in cfq_del_timer()
2617 static void __cfq_set_active_queue(struct cfq_data *cfqd, in __cfq_set_active_queue() argument
2621 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", in __cfq_set_active_queue()
2622 cfqd->serving_wl_class, cfqd->serving_wl_type); in __cfq_set_active_queue()
2637 cfq_del_timer(cfqd, cfqq); in __cfq_set_active_queue()
2640 cfqd->active_queue = cfqq; in __cfq_set_active_queue()
2647 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, in __cfq_slice_expired() argument
2650 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); in __cfq_slice_expired()
2653 cfq_del_timer(cfqd, cfqq); in __cfq_slice_expired()
2672 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); in __cfq_slice_expired()
2675 cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid); in __cfq_slice_expired()
2678 cfq_group_served(cfqd, cfqq->cfqg, cfqq); in __cfq_slice_expired()
2681 cfq_del_cfqq_rr(cfqd, cfqq); in __cfq_slice_expired()
2683 cfq_resort_rr_list(cfqd, cfqq); in __cfq_slice_expired()
2685 if (cfqq == cfqd->active_queue) in __cfq_slice_expired()
2686 cfqd->active_queue = NULL; in __cfq_slice_expired()
2688 if (cfqd->active_cic) { in __cfq_slice_expired()
2689 put_io_context(cfqd->active_cic->icq.ioc); in __cfq_slice_expired()
2690 cfqd->active_cic = NULL; in __cfq_slice_expired()
2694 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) in cfq_slice_expired() argument
2696 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_slice_expired()
2699 __cfq_slice_expired(cfqd, cfqq, timed_out); in cfq_slice_expired()
2706 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) in cfq_get_next_queue() argument
2708 struct cfq_rb_root *st = st_for(cfqd->serving_group, in cfq_get_next_queue()
2709 cfqd->serving_wl_class, cfqd->serving_wl_type); in cfq_get_next_queue()
2711 if (!cfqd->rq_queued) in cfq_get_next_queue()
2722 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) in cfq_get_next_queue_forced() argument
2729 if (!cfqd->rq_queued) in cfq_get_next_queue_forced()
2732 cfqg = cfq_get_next_cfqg(cfqd); in cfq_get_next_queue_forced()
2747 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, in cfq_set_active_queue() argument
2751 cfqq = cfq_get_next_queue(cfqd); in cfq_set_active_queue()
2753 __cfq_set_active_queue(cfqd, cfqq); in cfq_set_active_queue()
2757 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, in cfq_dist_from_last() argument
2760 if (blk_rq_pos(rq) >= cfqd->last_position) in cfq_dist_from_last()
2761 return blk_rq_pos(rq) - cfqd->last_position; in cfq_dist_from_last()
2763 return cfqd->last_position - blk_rq_pos(rq); in cfq_dist_from_last()
2766 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_rq_close() argument
2769 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; in cfq_rq_close()
2772 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, in cfqq_close() argument
2775 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; in cfqq_close()
2778 sector_t sector = cfqd->last_position; in cfqq_close()
2787 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); in cfqq_close()
2796 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) in cfqq_close()
2807 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) in cfqq_close()
2823 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, in cfq_close_cooperator() argument
2846 cfqq = cfqq_close(cfqd, cur_cfqq); in cfq_close_cooperator()
2875 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_should_idle() argument
2883 if (!cfqd->cfq_slice_idle) in cfq_should_idle()
2892 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) in cfq_should_idle()
2900 !cfq_io_thinktime_big(cfqd, &st->ttime, false)) in cfq_should_idle()
2902 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); in cfq_should_idle()
2906 static void cfq_arm_slice_timer(struct cfq_data *cfqd) in cfq_arm_slice_timer() argument
2908 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_arm_slice_timer()
2919 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag && in cfq_arm_slice_timer()
2920 !cfqd->cfq_group_idle) in cfq_arm_slice_timer()
2929 if (!cfq_should_idle(cfqd, cfqq)) { in cfq_arm_slice_timer()
2931 if (cfqd->cfq_group_idle) in cfq_arm_slice_timer()
2932 group_idle = cfqd->cfq_group_idle; in cfq_arm_slice_timer()
2946 cic = cfqd->active_cic; in cfq_arm_slice_timer()
2957 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu", in cfq_arm_slice_timer()
2968 cfq_io_thinktime_big(cfqd, &st->ttime, true))) in cfq_arm_slice_timer()
2974 sl = cfqd->cfq_group_idle; in cfq_arm_slice_timer()
2976 sl = cfqd->cfq_slice_idle; in cfq_arm_slice_timer()
2978 hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl), in cfq_arm_slice_timer()
2981 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl, in cfq_arm_slice_timer()
2990 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_dispatch_insert() local
2993 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); in cfq_dispatch_insert()
2995 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); in cfq_dispatch_insert()
3001 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; in cfq_dispatch_insert()
3028 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_prio_to_maxrq() argument
3030 const int base_rq = cfqd->cfq_slice_async_rq; in cfq_prio_to_maxrq()
3092 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, in cfq_choose_wl_type() argument
3116 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) in choose_wl_class_and_type() argument
3122 enum wl_class_t original_class = cfqd->serving_wl_class; in choose_wl_class_and_type()
3126 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
3127 cfqd->serving_wl_class = RT_WORKLOAD; in choose_wl_class_and_type()
3128 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) in choose_wl_class_and_type()
3129 cfqd->serving_wl_class = BE_WORKLOAD; in choose_wl_class_and_type()
3131 cfqd->serving_wl_class = IDLE_WORKLOAD; in choose_wl_class_and_type()
3132 cfqd->workload_expires = now + jiffies_to_nsecs(1); in choose_wl_class_and_type()
3136 if (original_class != cfqd->serving_wl_class) in choose_wl_class_and_type()
3144 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
3150 if (count && !(now > cfqd->workload_expires)) in choose_wl_class_and_type()
3155 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, in choose_wl_class_and_type()
3156 cfqd->serving_wl_class); in choose_wl_class_and_type()
3157 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); in choose_wl_class_and_type()
3165 group_slice = cfq_group_slice(cfqd, cfqg); in choose_wl_class_and_type()
3168 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], in choose_wl_class_and_type()
3169 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, in choose_wl_class_and_type()
3172 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { in choose_wl_class_and_type()
3182 tmp = cfqd->cfq_target_latency * in choose_wl_class_and_type()
3183 cfqg_busy_async_queues(cfqd, cfqg); in choose_wl_class_and_type()
3184 tmp = div_u64(tmp, cfqd->busy_queues); in choose_wl_class_and_type()
3189 slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]); in choose_wl_class_and_type()
3192 slice = max(slice, 2 * cfqd->cfq_slice_idle); in choose_wl_class_and_type()
3195 cfq_log(cfqd, "workload slice:%llu", slice); in choose_wl_class_and_type()
3196 cfqd->workload_expires = now + slice; in choose_wl_class_and_type()
3199 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) in cfq_get_next_cfqg() argument
3201 struct cfq_rb_root *st = &cfqd->grp_service_tree; in cfq_get_next_cfqg()
3211 static void cfq_choose_cfqg(struct cfq_data *cfqd) in cfq_choose_cfqg() argument
3213 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); in cfq_choose_cfqg()
3216 cfqd->serving_group = cfqg; in cfq_choose_cfqg()
3220 cfqd->workload_expires = now + cfqg->saved_wl_slice; in cfq_choose_cfqg()
3221 cfqd->serving_wl_type = cfqg->saved_wl_type; in cfq_choose_cfqg()
3222 cfqd->serving_wl_class = cfqg->saved_wl_class; in cfq_choose_cfqg()
3224 cfqd->workload_expires = now - 1; in cfq_choose_cfqg()
3226 choose_wl_class_and_type(cfqd, cfqg); in cfq_choose_cfqg()
3233 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) in cfq_select_queue() argument
3238 cfqq = cfqd->active_queue; in cfq_select_queue()
3242 if (!cfqd->rq_queued) in cfq_select_queue()
3265 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { in cfq_select_queue()
3285 new_cfqq = cfq_close_cooperator(cfqd, cfqq); in cfq_select_queue()
3297 if (hrtimer_active(&cfqd->idle_slice_timer)) { in cfq_select_queue()
3313 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { in cfq_select_queue()
3323 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && in cfq_select_queue()
3325 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { in cfq_select_queue()
3331 cfq_slice_expired(cfqd, 0); in cfq_select_queue()
3338 cfq_choose_cfqg(cfqd); in cfq_select_queue()
3340 cfqq = cfq_set_active_queue(cfqd, new_cfqq); in cfq_select_queue()
3350 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); in __cfq_forced_dispatch_cfqq()
3357 __cfq_slice_expired(cfqq->cfqd, cfqq, 0); in __cfq_forced_dispatch_cfqq()
3365 static int cfq_forced_dispatch(struct cfq_data *cfqd) in cfq_forced_dispatch() argument
3371 cfq_slice_expired(cfqd, 0); in cfq_forced_dispatch()
3372 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { in cfq_forced_dispatch()
3373 __cfq_set_active_queue(cfqd, cfqq); in cfq_forced_dispatch()
3377 BUG_ON(cfqd->busy_queues); in cfq_forced_dispatch()
3379 cfq_log(cfqd, "forced_dispatch=%d", dispatched); in cfq_forced_dispatch()
3383 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, in cfq_slice_used_soon() argument
3391 if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end) in cfq_slice_used_soon()
3397 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_may_dispatch() argument
3407 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) in cfq_may_dispatch()
3413 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) in cfq_may_dispatch()
3416 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); in cfq_may_dispatch()
3438 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) in cfq_may_dispatch()
3444 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && in cfq_may_dispatch()
3451 if (cfqd->busy_queues == 1 || promote_sync) in cfq_may_dispatch()
3460 max_dispatch = cfqd->cfq_quantum; in cfq_may_dispatch()
3468 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { in cfq_may_dispatch()
3469 u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync; in cfq_may_dispatch()
3472 depth = div64_u64(last_sync, cfqd->cfq_slice[1]); in cfq_may_dispatch()
3489 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_dispatch_request() argument
3499 if (!cfq_may_dispatch(cfqd, cfqq)) in cfq_dispatch_request()
3508 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); in cfq_dispatch_request()
3513 cfq_dispatch_insert(cfqd->queue, rq); in cfq_dispatch_request()
3515 if (!cfqd->active_cic) { in cfq_dispatch_request()
3519 cfqd->active_cic = cic; in cfq_dispatch_request()
3531 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_dispatch_requests() local
3534 if (!cfqd->busy_queues) in cfq_dispatch_requests()
3538 return cfq_forced_dispatch(cfqd); in cfq_dispatch_requests()
3540 cfqq = cfq_select_queue(cfqd); in cfq_dispatch_requests()
3547 if (!cfq_dispatch_request(cfqd, cfqq)) in cfq_dispatch_requests()
3557 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && in cfq_dispatch_requests()
3558 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || in cfq_dispatch_requests()
3561 cfq_slice_expired(cfqd, 0); in cfq_dispatch_requests()
3564 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); in cfq_dispatch_requests()
3577 struct cfq_data *cfqd = cfqq->cfqd; in cfq_put_queue() local
3586 cfq_log_cfqq(cfqd, cfqq, "put_queue"); in cfq_put_queue()
3591 if (unlikely(cfqd->active_queue == cfqq)) { in cfq_put_queue()
3592 __cfq_slice_expired(cfqd, cfqq, 0); in cfq_put_queue()
3593 cfq_schedule_dispatch(cfqd); in cfq_put_queue()
3622 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_exit_cfqq() argument
3624 if (unlikely(cfqq == cfqd->active_queue)) { in cfq_exit_cfqq()
3625 __cfq_slice_expired(cfqd, cfqq, 0); in cfq_exit_cfqq()
3626 cfq_schedule_dispatch(cfqd); in cfq_exit_cfqq()
3644 struct cfq_data *cfqd = cic_to_cfqd(cic); in cfq_exit_icq() local
3647 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false)); in cfq_exit_icq()
3652 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true)); in cfq_exit_icq()
3704 struct cfq_data *cfqd = cic_to_cfqd(cic); in check_ioprio_changed() local
3711 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) in check_ioprio_changed()
3717 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio); in check_ioprio_changed()
3728 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_init_cfqq() argument
3736 cfqq->cfqd = cfqd; in cfq_init_cfqq()
3751 struct cfq_data *cfqd = cic_to_cfqd(cic); in check_blkcg_changed() local
3763 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) in check_blkcg_changed()
3772 cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); in check_blkcg_changed()
3779 cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); in check_blkcg_changed()
3811 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, in cfq_get_queue() argument
3821 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio)); in cfq_get_queue()
3823 cfqq = &cfqd->oom_cfqq; in cfq_get_queue()
3841 cfqd->queue->node); in cfq_get_queue()
3843 cfqq = &cfqd->oom_cfqq; in cfq_get_queue()
3849 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); in cfq_get_queue()
3852 cfq_log_cfqq(cfqd, cfqq, "alloced"); in cfq_get_queue()
3878 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_io_thinktime() argument
3882 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); in cfq_update_io_thinktime()
3884 cfqd->cfq_slice_idle); in cfq_update_io_thinktime()
3887 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); in cfq_update_io_thinktime()
3892 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_io_seektime() argument
3905 if (blk_queue_nonrot(cfqd->queue)) in cfq_update_io_seektime()
3922 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_update_idle_window() argument
3941 !cfqd->cfq_slice_idle || in cfq_update_idle_window()
3945 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) in cfq_update_idle_window()
3952 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); in cfq_update_idle_window()
3965 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, in cfq_should_preempt() argument
3970 cfqq = cfqd->active_queue; in cfq_should_preempt()
4012 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && in cfq_should_preempt()
4025 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) in cfq_should_preempt()
4028 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) in cfq_should_preempt()
4035 if (cfq_rq_close(cfqd, cfqq, rq)) in cfq_should_preempt()
4045 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_preempt_queue() argument
4047 enum wl_type_t old_type = cfqq_type(cfqd->active_queue); in cfq_preempt_queue()
4049 cfq_log_cfqq(cfqd, cfqq, "preempt"); in cfq_preempt_queue()
4050 cfq_slice_expired(cfqd, 1); in cfq_preempt_queue()
4065 cfq_service_tree_add(cfqd, cfqq, 1); in cfq_preempt_queue()
4076 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, in cfq_rq_enqueued() argument
4081 cfqd->rq_queued++; in cfq_rq_enqueued()
4085 cfq_update_io_thinktime(cfqd, cfqq, cic); in cfq_rq_enqueued()
4086 cfq_update_io_seektime(cfqd, cfqq, rq); in cfq_rq_enqueued()
4087 cfq_update_idle_window(cfqd, cfqq, cic); in cfq_rq_enqueued()
4091 if (cfqq == cfqd->active_queue) { in cfq_rq_enqueued()
4104 cfqd->busy_queues > 1) { in cfq_rq_enqueued()
4105 cfq_del_timer(cfqd, cfqq); in cfq_rq_enqueued()
4107 __blk_run_queue(cfqd->queue); in cfq_rq_enqueued()
4113 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { in cfq_rq_enqueued()
4120 cfq_preempt_queue(cfqd, cfqq); in cfq_rq_enqueued()
4121 __blk_run_queue(cfqd->queue); in cfq_rq_enqueued()
4127 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_insert_request() local
4130 cfq_log_cfqq(cfqd, cfqq, "insert_request"); in cfq_insert_request()
4133 rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; in cfq_insert_request()
4136 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, in cfq_insert_request()
4138 cfq_rq_enqueued(cfqd, cfqq, rq); in cfq_insert_request()
4145 static void cfq_update_hw_tag(struct cfq_data *cfqd) in cfq_update_hw_tag() argument
4147 struct cfq_queue *cfqq = cfqd->active_queue; in cfq_update_hw_tag()
4149 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) in cfq_update_hw_tag()
4150 cfqd->hw_tag_est_depth = cfqd->rq_in_driver; in cfq_update_hw_tag()
4152 if (cfqd->hw_tag == 1) in cfq_update_hw_tag()
4155 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && in cfq_update_hw_tag()
4156 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
4166 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
4169 if (cfqd->hw_tag_samples++ < 50) in cfq_update_hw_tag()
4172 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) in cfq_update_hw_tag()
4173 cfqd->hw_tag = 1; in cfq_update_hw_tag()
4175 cfqd->hw_tag = 0; in cfq_update_hw_tag()
4178 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) in cfq_should_wait_busy() argument
4180 struct cfq_io_cq *cic = cfqd->active_cic; in cfq_should_wait_busy()
4192 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) in cfq_should_wait_busy()
4219 struct cfq_data *cfqd = cfqq->cfqd; in cfq_completed_request() local
4223 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq)); in cfq_completed_request()
4225 cfq_update_hw_tag(cfqd); in cfq_completed_request()
4227 WARN_ON(!cfqd->rq_in_driver); in cfq_completed_request()
4229 cfqd->rq_in_driver--; in cfq_completed_request()
4235 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; in cfq_completed_request()
4249 if (rq->start_time_ns + cfqd->cfq_fifo_expire[1] <= now) in cfq_completed_request()
4250 cfqd->last_delayed_sync = now; in cfq_completed_request()
4261 if (cfqd->active_queue == cfqq) { in cfq_completed_request()
4265 cfq_set_prio_slice(cfqd, cfqq); in cfq_completed_request()
4273 if (cfq_should_wait_busy(cfqd, cfqq)) { in cfq_completed_request()
4274 u64 extend_sl = cfqd->cfq_slice_idle; in cfq_completed_request()
4275 if (!cfqd->cfq_slice_idle) in cfq_completed_request()
4276 extend_sl = cfqd->cfq_group_idle; in cfq_completed_request()
4279 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); in cfq_completed_request()
4291 cfq_slice_expired(cfqd, 1); in cfq_completed_request()
4293 !cfq_close_cooperator(cfqd, cfqq)) { in cfq_completed_request()
4294 cfq_arm_slice_timer(cfqd); in cfq_completed_request()
4298 if (!cfqd->rq_in_driver) in cfq_completed_request()
4299 cfq_schedule_dispatch(cfqd); in cfq_completed_request()
4332 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_may_queue() local
4343 cic = cfq_cic_lookup(cfqd, tsk->io_context); in cfq_may_queue()
4381 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, in cfq_merge_cfqqs() argument
4384 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); in cfq_merge_cfqqs()
4419 struct cfq_data *cfqd = q->elevator->elevator_data; in cfq_set_request() local
4431 if (!cfqq || cfqq == &cfqd->oom_cfqq) { in cfq_set_request()
4434 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio); in cfq_set_request()
4441 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); in cfq_set_request()
4454 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); in cfq_set_request()
4470 struct cfq_data *cfqd = in cfq_kick_queue() local
4472 struct request_queue *q = cfqd->queue; in cfq_kick_queue()
4475 __blk_run_queue(cfqd->queue); in cfq_kick_queue()
4484 struct cfq_data *cfqd = container_of(timer, struct cfq_data, in cfq_idle_slice_timer() local
4490 cfq_log(cfqd, "idle timer fired"); in cfq_idle_slice_timer()
4492 spin_lock_irqsave(cfqd->queue->queue_lock, flags); in cfq_idle_slice_timer()
4494 cfqq = cfqd->active_queue; in cfq_idle_slice_timer()
4514 if (!cfqd->busy_queues) in cfq_idle_slice_timer()
4529 cfq_slice_expired(cfqd, timed_out); in cfq_idle_slice_timer()
4531 cfq_schedule_dispatch(cfqd); in cfq_idle_slice_timer()
4533 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); in cfq_idle_slice_timer()
4537 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) in cfq_shutdown_timer_wq() argument
4539 hrtimer_cancel(&cfqd->idle_slice_timer); in cfq_shutdown_timer_wq()
4540 cancel_work_sync(&cfqd->unplug_work); in cfq_shutdown_timer_wq()
4545 struct cfq_data *cfqd = e->elevator_data; in cfq_exit_queue() local
4546 struct request_queue *q = cfqd->queue; in cfq_exit_queue()
4548 cfq_shutdown_timer_wq(cfqd); in cfq_exit_queue()
4552 if (cfqd->active_queue) in cfq_exit_queue()
4553 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); in cfq_exit_queue()
4557 cfq_shutdown_timer_wq(cfqd); in cfq_exit_queue()
4562 kfree(cfqd->root_group); in cfq_exit_queue()
4564 kfree(cfqd); in cfq_exit_queue()
4569 struct cfq_data *cfqd; in cfq_init_queue() local
4578 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); in cfq_init_queue()
4579 if (!cfqd) { in cfq_init_queue()
4583 eq->elevator_data = cfqd; in cfq_init_queue()
4585 cfqd->queue = q; in cfq_init_queue()
4591 cfqd->grp_service_tree = CFQ_RB_ROOT; in cfq_init_queue()
4599 cfqd->root_group = blkg_to_cfqg(q->root_blkg); in cfq_init_queue()
4602 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), in cfq_init_queue()
4603 GFP_KERNEL, cfqd->queue->node); in cfq_init_queue()
4604 if (!cfqd->root_group) in cfq_init_queue()
4607 cfq_init_cfqg_base(cfqd->root_group); in cfq_init_queue()
4608 cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL; in cfq_init_queue()
4609 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL; in cfq_init_queue()
4618 cfqd->prio_trees[i] = RB_ROOT; in cfq_init_queue()
4627 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); in cfq_init_queue()
4628 cfqd->oom_cfqq.ref++; in cfq_init_queue()
4631 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); in cfq_init_queue()
4632 cfqg_put(cfqd->root_group); in cfq_init_queue()
4635 hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC, in cfq_init_queue()
4637 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; in cfq_init_queue()
4639 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); in cfq_init_queue()
4641 cfqd->cfq_quantum = cfq_quantum; in cfq_init_queue()
4642 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; in cfq_init_queue()
4643 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; in cfq_init_queue()
4644 cfqd->cfq_back_max = cfq_back_max; in cfq_init_queue()
4645 cfqd->cfq_back_penalty = cfq_back_penalty; in cfq_init_queue()
4646 cfqd->cfq_slice[0] = cfq_slice_async; in cfq_init_queue()
4647 cfqd->cfq_slice[1] = cfq_slice_sync; in cfq_init_queue()
4648 cfqd->cfq_target_latency = cfq_target_latency; in cfq_init_queue()
4649 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; in cfq_init_queue()
4650 cfqd->cfq_slice_idle = cfq_slice_idle; in cfq_init_queue()
4651 cfqd->cfq_group_idle = cfq_group_idle; in cfq_init_queue()
4652 cfqd->cfq_latency = 1; in cfq_init_queue()
4653 cfqd->hw_tag = -1; in cfq_init_queue()
4658 cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC; in cfq_init_queue()
4662 kfree(cfqd); in cfq_init_queue()
4670 struct cfq_data *cfqd = e->elevator_data; in cfq_registered_queue() local
4676 cfqd->cfq_slice_idle = 0; in cfq_registered_queue()
4700 struct cfq_data *cfqd = e->elevator_data; \
4706 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4707 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4708 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4709 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4710 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4711 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4712 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4713 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4714 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4715 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4716 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4717 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4723 struct cfq_data *cfqd = e->elevator_data; \
4728 USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
4729 USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
4730 USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
4731 USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
4732 USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
4738 struct cfq_data *cfqd = e->elevator_data; \
4752 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4753 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4755 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4757 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4758 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4760 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4761 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4762 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4763 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4764 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4766 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4767 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4773 struct cfq_data *cfqd = e->elevator_data; \
4784 USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
4785 USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
4786 USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
4787 USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
4788 USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);