Lines Matching refs:bfqd

394 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,  in bfq_bic_lookup()  argument
416 void bfq_schedule_dispatch(struct bfq_data *bfqd) in bfq_schedule_dispatch() argument
418 if (bfqd->queued != 0) { in bfq_schedule_dispatch()
419 bfq_log(bfqd, "schedule dispatch"); in bfq_schedule_dispatch()
420 blk_mq_run_hw_queues(bfqd->queue, true); in bfq_schedule_dispatch()
434 static struct request *bfq_choose_req(struct bfq_data *bfqd, in bfq_choose_req() argument
465 back_max = bfqd->bfq_back_max * 2; in bfq_choose_req()
475 d1 = (last - s1) * bfqd->bfq_back_penalty; in bfq_choose_req()
482 d2 = (last - s2) * bfqd->bfq_back_penalty; in bfq_choose_req()
532 struct bfq_data *bfqd = data->q->elevator->elevator_data; in bfq_limit_depth() local
538 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; in bfq_limit_depth()
540 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", in bfq_limit_depth()
541 __func__, bfqd->wr_busy_queues, op_is_sync(op), in bfq_limit_depth()
546 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, in bfq_rq_pos_tree_lookup() argument
579 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", in bfq_rq_pos_tree_lookup()
593 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_pos_tree_add_move() argument
617 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, in bfq_pos_tree_add_move()
629 static bool bfq_differentiated_weights(struct bfq_data *bfqd) in bfq_differentiated_weights() argument
635 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && in bfq_differentiated_weights()
636 (bfqd->queue_weights_tree.rb_node->rb_left || in bfq_differentiated_weights()
637 bfqd->queue_weights_tree.rb_node->rb_right) in bfq_differentiated_weights()
640 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && in bfq_differentiated_weights()
641 (bfqd->group_weights_tree.rb_node->rb_left || in bfq_differentiated_weights()
642 bfqd->group_weights_tree.rb_node->rb_right) in bfq_differentiated_weights()
672 static bool bfq_symmetric_scenario(struct bfq_data *bfqd) in bfq_symmetric_scenario() argument
674 return !bfq_differentiated_weights(bfqd); in bfq_symmetric_scenario()
690 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, in bfq_weights_tree_add() argument
759 void __bfq_weights_tree_remove(struct bfq_data *bfqd, in __bfq_weights_tree_remove() argument
781 void bfq_weights_tree_remove(struct bfq_data *bfqd, in bfq_weights_tree_remove() argument
786 __bfq_weights_tree_remove(bfqd, &bfqq->entity, in bfq_weights_tree_remove()
787 &bfqd->queue_weights_tree); in bfq_weights_tree_remove()
809 __bfq_weights_tree_remove(bfqd, entity, in bfq_weights_tree_remove()
810 &bfqd->group_weights_tree); in bfq_weights_tree_remove()
832 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); in bfq_check_fifo()
836 static struct request *bfq_find_next_rq(struct bfq_data *bfqd, in bfq_find_next_rq() argument
860 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq()
884 static void bfq_updated_next_req(struct bfq_data *bfqd, in bfq_updated_next_req() argument
894 if (bfqq == bfqd->in_service_queue) in bfq_updated_next_req()
905 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", in bfq_updated_next_req()
907 bfq_requeue_bfqq(bfqd, bfqq, false); in bfq_updated_next_req()
911 static unsigned int bfq_wr_duration(struct bfq_data *bfqd) in bfq_wr_duration() argument
915 if (bfqd->bfq_wr_max_time > 0) in bfq_wr_duration()
916 return bfqd->bfq_wr_max_time; in bfq_wr_duration()
918 dur = bfqd->rate_dur_prod; in bfq_wr_duration()
919 do_div(dur, bfqd->peak_rate); in bfq_wr_duration()
946 struct bfq_data *bfqd) in switch_back_to_interactive_wr() argument
948 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in switch_back_to_interactive_wr()
949 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in switch_back_to_interactive_wr()
954 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, in bfq_bfqq_resume_state() argument
979 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_bfqq_resume_state()
982 bfq_wr_duration(bfqd))) { in bfq_bfqq_resume_state()
983 switch_back_to_interactive_wr(bfqq, bfqd); in bfq_bfqq_resume_state()
986 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_bfqq_resume_state()
998 bfqd->wr_busy_queues++; in bfq_bfqq_resume_state()
1000 bfqd->wr_busy_queues--; in bfq_bfqq_resume_state()
1009 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_reset_burst_list() argument
1014 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) in bfq_reset_burst_list()
1016 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_reset_burst_list()
1017 bfqd->burst_size = 1; in bfq_reset_burst_list()
1018 bfqd->burst_parent_entity = bfqq->entity.parent; in bfq_reset_burst_list()
1022 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_add_to_burst() argument
1025 bfqd->burst_size++; in bfq_add_to_burst()
1027 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { in bfq_add_to_burst()
1035 bfqd->large_burst = true; in bfq_add_to_burst()
1041 hlist_for_each_entry(bfqq_item, &bfqd->burst_list, in bfq_add_to_burst()
1053 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, in bfq_add_to_burst()
1062 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_add_to_burst()
1168 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_handle_burst() argument
1198 if (time_is_before_jiffies(bfqd->last_ins_in_burst + in bfq_handle_burst()
1199 bfqd->bfq_burst_interval) || in bfq_handle_burst()
1200 bfqq->entity.parent != bfqd->burst_parent_entity) { in bfq_handle_burst()
1201 bfqd->large_burst = false; in bfq_handle_burst()
1202 bfq_reset_burst_list(bfqd, bfqq); in bfq_handle_burst()
1211 if (bfqd->large_burst) { in bfq_handle_burst()
1221 bfq_add_to_burst(bfqd, bfqq); in bfq_handle_burst()
1231 bfqd->last_ins_in_burst = jiffies; in bfq_handle_burst()
1246 static int bfq_max_budget(struct bfq_data *bfqd) in bfq_max_budget() argument
1248 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_max_budget()
1251 return bfqd->bfq_max_budget; in bfq_max_budget()
1258 static int bfq_min_budget(struct bfq_data *bfqd) in bfq_min_budget() argument
1260 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_min_budget()
1263 return bfqd->bfq_max_budget / 32; in bfq_min_budget()
1367 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, in bfq_bfqq_update_budg_for_activation() argument
1429 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, in bfq_update_bfqq_wr_on_rq_arrival() argument
1441 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1442 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1457 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1460 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1474 2 * bfq_min_budget(bfqd)); in bfq_update_bfqq_wr_on_rq_arrival()
1477 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1478 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1512 bfqd->bfq_wr_rt_max_time) { in bfq_update_bfqq_wr_on_rq_arrival()
1517 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1518 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1526 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, in bfq_bfqq_idle_for_long_time() argument
1532 bfqd->bfq_wr_min_idle_time); in bfq_bfqq_idle_for_long_time()
1535 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, in bfq_bfqq_handle_idle_busy_switch() argument
1543 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), in bfq_bfqq_handle_idle_busy_switch()
1551 bfqd->bfq_slice_idle * 3; in bfq_bfqq_handle_idle_busy_switch()
1562 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_handle_idle_busy_switch()
1567 wr_or_deserves_wr = bfqd->low_latency && in bfq_bfqq_handle_idle_busy_switch()
1577 bfq_bfqq_update_budg_for_activation(bfqd, bfqq, in bfq_bfqq_handle_idle_busy_switch()
1610 bfqd->bfq_requests_within_timer) in bfq_bfqq_handle_idle_busy_switch()
1616 if (bfqd->low_latency) { in bfq_bfqq_handle_idle_busy_switch()
1620 jiffies - bfqd->bfq_wr_min_idle_time - 1; in bfq_bfqq_handle_idle_busy_switch()
1623 bfqd->bfq_wr_min_idle_time)) { in bfq_bfqq_handle_idle_busy_switch()
1624 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, in bfq_bfqq_handle_idle_busy_switch()
1640 bfq_add_bfqq_busy(bfqd, bfqq); in bfq_bfqq_handle_idle_busy_switch()
1652 if (bfqd->in_service_queue && bfqq_wants_to_preempt && in bfq_bfqq_handle_idle_busy_switch()
1653 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && in bfq_bfqq_handle_idle_busy_switch()
1654 next_queue_may_preempt(bfqd)) in bfq_bfqq_handle_idle_busy_switch()
1655 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, in bfq_bfqq_handle_idle_busy_switch()
1662 struct bfq_data *bfqd = bfqq->bfqd; in bfq_add_request() local
1667 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); in bfq_add_request()
1669 bfqd->queued++; in bfq_add_request()
1677 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); in bfq_add_request()
1684 bfq_pos_tree_add_move(bfqd, bfqq); in bfq_add_request()
1687 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, in bfq_add_request()
1690 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && in bfq_add_request()
1693 bfqd->bfq_wr_min_inter_arr_async)) { in bfq_add_request()
1694 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_add_request()
1695 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_add_request()
1697 bfqd->wr_busy_queues++; in bfq_add_request()
1701 bfq_updated_next_req(bfqd, bfqq); in bfq_add_request()
1730 if (bfqd->low_latency && in bfq_add_request()
1735 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, in bfq_find_rq_fmerge() argument
1739 struct bfq_queue *bfqq = bfqd->bio_bfqq; in bfq_find_rq_fmerge()
1759 struct bfq_data *bfqd = q->elevator->elevator_data;
1761 bfqd->rq_in_driver++;
1766 struct bfq_data *bfqd = q->elevator->elevator_data;
1768 bfqd->rq_in_driver--;
1776 struct bfq_data *bfqd = bfqq->bfqd; in bfq_remove_request() local
1780 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); in bfq_remove_request()
1781 bfq_updated_next_req(bfqd, bfqq); in bfq_remove_request()
1787 bfqd->queued--; in bfq_remove_request()
1797 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { in bfq_remove_request()
1798 bfq_del_bfqq_busy(bfqd, bfqq, false); in bfq_remove_request()
1823 bfq_pos_tree_add_move(bfqd, bfqq); in bfq_remove_request()
1834 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_bio_merge() local
1843 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); in bfq_bio_merge()
1846 spin_lock_irq(&bfqd->lock); in bfq_bio_merge()
1849 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); in bfq_bio_merge()
1851 bfqd->bio_bfqq = NULL; in bfq_bio_merge()
1852 bfqd->bio_bic = bic; in bfq_bio_merge()
1858 spin_unlock_irq(&bfqd->lock); in bfq_bio_merge()
1866 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_request_merge() local
1869 __rq = bfq_find_rq_fmerge(bfqd, bio, q); in bfq_request_merge()
1889 struct bfq_data *bfqd = bfqq->bfqd; in bfq_request_merged() local
1898 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, in bfq_request_merged()
1899 bfqd->last_position); in bfq_request_merged()
1907 bfq_updated_next_req(bfqd, bfqq); in bfq_request_merged()
1908 bfq_pos_tree_add_move(bfqd, bfqq); in bfq_request_merged()
1960 bfqq->bfqd->wr_busy_queues--; in bfq_bfqq_end_wr()
1971 void bfq_end_wr_async_queues(struct bfq_data *bfqd, in bfq_end_wr_async_queues() argument
1984 static void bfq_end_wr(struct bfq_data *bfqd) in bfq_end_wr() argument
1988 spin_lock_irq(&bfqd->lock); in bfq_end_wr()
1990 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) in bfq_end_wr()
1992 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) in bfq_end_wr()
1994 bfq_end_wr_async(bfqd); in bfq_end_wr()
1996 spin_unlock_irq(&bfqd->lock); in bfq_end_wr()
2014 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, in bfqq_find_close() argument
2029 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); in bfqq_find_close()
2056 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, in bfq_find_close_cooperator() argument
2069 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); in bfq_find_close_cooperator()
2107 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", in bfq_setup_merge()
2185 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_setup_cooperator() argument
2207 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) in bfq_setup_cooperator()
2211 if (bfqd->busy_queues == 1) in bfq_setup_cooperator()
2214 in_service_bfqq = bfqd->in_service_queue; in bfq_setup_cooperator()
2217 likely(in_service_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
2218 bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && in bfq_setup_cooperator()
2230 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, in bfq_setup_cooperator()
2233 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
2259 bfqq->bfqd->low_latency)) { in bfq_bfqq_save_state()
2269 bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; in bfq_bfqq_save_state()
2270 bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); in bfq_bfqq_save_state()
2282 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, in bfq_merge_bfqqs() argument
2285 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", in bfq_merge_bfqqs()
2310 bfqd->wr_busy_queues++; in bfq_merge_bfqqs()
2318 bfqd->wr_busy_queues--; in bfq_merge_bfqqs()
2321 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", in bfq_merge_bfqqs()
2322 bfqd->wr_busy_queues); in bfq_merge_bfqqs()
2348 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_allow_bio_merge() local
2350 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; in bfq_allow_bio_merge()
2369 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); in bfq_allow_bio_merge()
2378 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, in bfq_allow_bio_merge()
2393 bfqd->bio_bfqq = bfqq; in bfq_allow_bio_merge()
2405 static void bfq_set_budget_timeout(struct bfq_data *bfqd, in bfq_set_budget_timeout() argument
2410 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) in bfq_set_budget_timeout()
2415 bfqd->last_budget_start = ktime_get(); in bfq_set_budget_timeout()
2418 bfqd->bfq_timeout * timeout_coeff; in bfq_set_budget_timeout()
2421 static void __bfq_set_in_service_queue(struct bfq_data *bfqd, in __bfq_set_in_service_queue() argument
2427 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; in __bfq_set_in_service_queue()
2431 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in __bfq_set_in_service_queue()
2465 bfq_set_budget_timeout(bfqd, bfqq); in __bfq_set_in_service_queue()
2466 bfq_log_bfqq(bfqd, bfqq, in __bfq_set_in_service_queue()
2471 bfqd->in_service_queue = bfqq; in __bfq_set_in_service_queue()
2477 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) in bfq_set_in_service_queue() argument
2479 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); in bfq_set_in_service_queue()
2481 __bfq_set_in_service_queue(bfqd, bfqq); in bfq_set_in_service_queue()
2485 static void bfq_arm_slice_timer(struct bfq_data *bfqd) in bfq_arm_slice_timer() argument
2487 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_arm_slice_timer()
2497 sl = bfqd->bfq_slice_idle; in bfq_arm_slice_timer()
2509 bfq_symmetric_scenario(bfqd)) in bfq_arm_slice_timer()
2512 bfqd->last_idling_start = ktime_get(); in bfq_arm_slice_timer()
2513 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), in bfq_arm_slice_timer()
2525 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) in bfq_calc_max_budget() argument
2527 return (u64)bfqd->peak_rate * USEC_PER_MSEC * in bfq_calc_max_budget()
2528 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; in bfq_calc_max_budget()
2536 static void update_thr_responsiveness_params(struct bfq_data *bfqd) in update_thr_responsiveness_params() argument
2538 if (bfqd->bfq_user_max_budget == 0) { in update_thr_responsiveness_params()
2539 bfqd->bfq_max_budget = in update_thr_responsiveness_params()
2540 bfq_calc_max_budget(bfqd); in update_thr_responsiveness_params()
2541 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); in update_thr_responsiveness_params()
2545 static void bfq_reset_rate_computation(struct bfq_data *bfqd, in bfq_reset_rate_computation() argument
2549 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); in bfq_reset_rate_computation()
2550 bfqd->peak_rate_samples = 1; in bfq_reset_rate_computation()
2551 bfqd->sequential_samples = 0; in bfq_reset_rate_computation()
2552 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = in bfq_reset_rate_computation()
2555 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ in bfq_reset_rate_computation()
2557 bfq_log(bfqd, in bfq_reset_rate_computation()
2559 bfqd->peak_rate_samples, bfqd->sequential_samples, in bfq_reset_rate_computation()
2560 bfqd->tot_sectors_dispatched); in bfq_reset_rate_computation()
2563 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) in bfq_update_rate_reset() argument
2575 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || in bfq_update_rate_reset()
2576 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) in bfq_update_rate_reset()
2585 bfqd->delta_from_first = in bfq_update_rate_reset()
2586 max_t(u64, bfqd->delta_from_first, in bfq_update_rate_reset()
2587 bfqd->last_completion - bfqd->first_dispatch); in bfq_update_rate_reset()
2593 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, in bfq_update_rate_reset()
2594 div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); in bfq_update_rate_reset()
2602 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && in bfq_update_rate_reset()
2603 rate <= bfqd->peak_rate) || in bfq_update_rate_reset()
2630 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; in bfq_update_rate_reset()
2637 div_u64(weight * bfqd->delta_from_first, in bfq_update_rate_reset()
2651 bfqd->peak_rate *= divisor-1; in bfq_update_rate_reset()
2652 bfqd->peak_rate /= divisor; in bfq_update_rate_reset()
2655 bfqd->peak_rate += rate; in bfq_update_rate_reset()
2664 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); in bfq_update_rate_reset()
2666 update_thr_responsiveness_params(bfqd); in bfq_update_rate_reset()
2669 bfq_reset_rate_computation(bfqd, rq); in bfq_update_rate_reset()
2704 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) in bfq_update_peak_rate() argument
2708 if (bfqd->peak_rate_samples == 0) { /* first dispatch */ in bfq_update_peak_rate()
2709 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", in bfq_update_peak_rate()
2710 bfqd->peak_rate_samples); in bfq_update_peak_rate()
2711 bfq_reset_rate_computation(bfqd, rq); in bfq_update_peak_rate()
2727 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && in bfq_update_peak_rate()
2728 bfqd->rq_in_driver == 0) in bfq_update_peak_rate()
2732 bfqd->peak_rate_samples++; in bfq_update_peak_rate()
2734 if ((bfqd->rq_in_driver > 0 || in bfq_update_peak_rate()
2735 now_ns - bfqd->last_completion < BFQ_MIN_TT) in bfq_update_peak_rate()
2736 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) in bfq_update_peak_rate()
2737 bfqd->sequential_samples++; in bfq_update_peak_rate()
2739 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); in bfq_update_peak_rate()
2742 if (likely(bfqd->peak_rate_samples % 32)) in bfq_update_peak_rate()
2743 bfqd->last_rq_max_size = in bfq_update_peak_rate()
2744 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); in bfq_update_peak_rate()
2746 bfqd->last_rq_max_size = blk_rq_sectors(rq); in bfq_update_peak_rate()
2748 bfqd->delta_from_first = now_ns - bfqd->first_dispatch; in bfq_update_peak_rate()
2751 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) in bfq_update_peak_rate()
2755 bfq_update_rate_reset(bfqd, rq); in bfq_update_peak_rate()
2757 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_update_peak_rate()
2758 bfqd->last_dispatch = now_ns; in bfq_update_peak_rate()
2786 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) in __bfq_bfqq_expire() argument
2807 bfq_del_bfqq_busy(bfqd, bfqq, true); in __bfq_bfqq_expire()
2809 bfq_requeue_bfqq(bfqd, bfqq, true); in __bfq_bfqq_expire()
2813 bfq_pos_tree_add_move(bfqd, bfqq); in __bfq_bfqq_expire()
2821 __bfq_bfqd_reset_in_service(bfqd); in __bfq_bfqq_expire()
2833 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, in __bfq_bfqq_recalc_budget() argument
2840 min_budget = bfq_min_budget(bfqd); in __bfq_bfqq_recalc_budget()
2852 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", in __bfq_bfqq_recalc_budget()
2854 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", in __bfq_bfqq_recalc_budget()
2855 budget, bfq_min_budget(bfqd)); in __bfq_bfqq_recalc_budget()
2856 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", in __bfq_bfqq_recalc_budget()
2857 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); in __bfq_bfqq_recalc_budget()
2891 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
2906 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
2918 budget = min(budget * 4, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
2965 budget = bfqd->bfq_max_budget; in __bfq_bfqq_recalc_budget()
2970 if (bfqd->budgets_assigned >= bfq_stats_min_budgets && in __bfq_bfqq_recalc_budget()
2971 !bfqd->bfq_user_max_budget) in __bfq_bfqq_recalc_budget()
2972 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
2989 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", in __bfq_bfqq_recalc_budget()
3025 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_bfqq_is_slow() argument
3037 delta_ktime = bfqd->last_idling_start; in bfq_bfqq_is_slow()
3040 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); in bfq_bfqq_is_slow()
3045 if (blk_queue_nonrot(bfqd->queue)) in bfq_bfqq_is_slow()
3074 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; in bfq_bfqq_is_slow()
3077 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); in bfq_bfqq_is_slow()
3175 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, in bfq_bfqq_softrt_next_start() argument
3181 bfqd->bfq_wr_max_softrt_rate, in bfq_bfqq_softrt_next_start()
3182 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); in bfq_bfqq_softrt_next_start()
3211 void bfq_bfqq_expire(struct bfq_data *bfqd, in bfq_bfqq_expire() argument
3224 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); in bfq_bfqq_expire()
3245 bfq_bfqq_charge_time(bfqd, bfqq, delta); in bfq_bfqq_expire()
3251 if (bfqd->low_latency && bfqq->wr_coeff == 1) in bfq_bfqq_expire()
3254 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_expire()
3269 bfq_bfqq_softrt_next_start(bfqd, bfqq); in bfq_bfqq_expire()
3279 bfq_log_bfqq(bfqd, bfqq, in bfq_bfqq_expire()
3287 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); in bfq_bfqq_expire()
3289 __bfq_bfqq_expire(bfqd, bfqq); in bfq_bfqq_expire()
3349 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_may_expire_for_budg_timeout()
3386 struct bfq_data *bfqd = bfqq->bfqd; in bfq_better_to_idle() local
3388 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, in bfq_better_to_idle()
3394 if (bfqd->strict_guarantees) in bfq_better_to_idle()
3405 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || in bfq_better_to_idle()
3435 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && in bfq_better_to_idle()
3474 bfqd->wr_busy_queues == 0; in bfq_better_to_idle()
3587 !bfq_symmetric_scenario(bfqd); in bfq_better_to_idle()
3636 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) in bfq_select_queue() argument
3642 bfqq = bfqd->in_service_queue; in bfq_select_queue()
3646 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); in bfq_select_queue()
3703 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_select_queue()
3722 bfq_bfqq_expire(bfqd, bfqq, false, reason); in bfq_select_queue()
3724 bfqq = bfq_set_in_service_queue(bfqd); in bfq_select_queue()
3726 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); in bfq_select_queue()
3731 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); in bfq_select_queue()
3733 bfq_log(bfqd, "select_queue: no queue returned"); in bfq_select_queue()
3738 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_update_wr_data() argument
3743 bfq_log_bfqq(bfqd, bfqq, in bfq_update_wr_data()
3751 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); in bfq_update_wr_data()
3762 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || in bfq_update_wr_data()
3764 bfq_wr_duration(bfqd))) in bfq_update_wr_data()
3767 switch_back_to_interactive_wr(bfqq, bfqd); in bfq_update_wr_data()
3772 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && in bfq_update_wr_data()
3794 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, in bfq_dispatch_rq_from_bfqq() argument
3804 bfq_dispatch_remove(bfqd->queue, rq); in bfq_dispatch_rq_from_bfqq()
3817 bfq_update_wr_data(bfqd, bfqq); in bfq_dispatch_rq_from_bfqq()
3824 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) in bfq_dispatch_rq_from_bfqq()
3830 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); in bfq_dispatch_rq_from_bfqq()
3836 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() local
3842 return !list_empty_careful(&bfqd->dispatch) || in bfq_has_work()
3843 bfqd->busy_queues > 0; in bfq_has_work()
3848 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() local
3852 if (!list_empty(&bfqd->dispatch)) { in __bfq_dispatch_request()
3853 rq = list_first_entry(&bfqd->dispatch, struct request, in __bfq_dispatch_request()
3897 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); in __bfq_dispatch_request()
3899 if (bfqd->busy_queues == 0) in __bfq_dispatch_request()
3914 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) in __bfq_dispatch_request()
3917 bfqq = bfq_select_queue(bfqd); in __bfq_dispatch_request()
3921 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); in __bfq_dispatch_request()
3925 bfqd->rq_in_driver++; in __bfq_dispatch_request()
3987 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request() local
3992 spin_lock_irq(&bfqd->lock); in bfq_dispatch_request()
3994 in_serv_queue = bfqd->in_service_queue; in bfq_dispatch_request()
4002 spin_unlock_irq(&bfqd->lock); in bfq_dispatch_request()
4023 if (bfqq->bfqd) in bfq_put_queue()
4024 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", in bfq_put_queue()
4059 if (bfqq->bic && bfqq->bfqd->burst_size > 0) in bfq_put_queue()
4060 bfqq->bfqd->burst_size--; in bfq_put_queue()
4088 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) in bfq_exit_bfqq() argument
4090 if (bfqq == bfqd->in_service_queue) { in bfq_exit_bfqq()
4091 __bfq_bfqq_expire(bfqd, bfqq); in bfq_exit_bfqq()
4092 bfq_schedule_dispatch(bfqd); in bfq_exit_bfqq()
4095 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); in bfq_exit_bfqq()
4105 struct bfq_data *bfqd; in bfq_exit_icq_bfqq() local
4108 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ in bfq_exit_icq_bfqq()
4110 if (bfqq && bfqd) { in bfq_exit_icq_bfqq()
4113 spin_lock_irqsave(&bfqd->lock, flags); in bfq_exit_icq_bfqq()
4114 bfq_exit_bfqq(bfqd, bfqq); in bfq_exit_icq_bfqq()
4116 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_exit_icq_bfqq()
4137 struct bfq_data *bfqd = bfqq->bfqd; in bfq_set_next_ioprio_data() local
4139 if (!bfqd) in bfq_set_next_ioprio_data()
4145 dev_err(bfqq->bfqd->queue->backing_dev_info->dev, in bfq_set_next_ioprio_data()
4179 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
4185 struct bfq_data *bfqd = bic_to_bfqd(bic); in bfq_check_ioprio_change() local
4193 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) in bfq_check_ioprio_change()
4202 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); in bfq_check_ioprio_change()
4211 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_init_bfqq() argument
4219 bfqq->bfqd = bfqd; in bfq_init_bfqq()
4246 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; in bfq_init_bfqq()
4269 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, in bfq_async_queue_prio() argument
4288 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, in bfq_get_queue() argument
4300 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); in bfq_get_queue()
4302 bfqq = &bfqd->oom_bfqq; in bfq_get_queue()
4307 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, in bfq_get_queue()
4316 bfqd->queue->node); in bfq_get_queue()
4319 bfq_init_bfqq(bfqd, bfqq, bic, current->pid, in bfq_get_queue()
4322 bfq_log_bfqq(bfqd, bfqq, "allocated"); in bfq_get_queue()
4324 bfqq = &bfqd->oom_bfqq; in bfq_get_queue()
4325 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); in bfq_get_queue()
4341 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", in bfq_get_queue()
4348 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); in bfq_get_queue()
4353 static void bfq_update_io_thinktime(struct bfq_data *bfqd, in bfq_update_io_thinktime() argument
4359 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); in bfq_update_io_thinktime()
4368 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_update_io_seektime() argument
4374 (!blk_queue_nonrot(bfqd->queue) || in bfq_update_io_seektime()
4378 static void bfq_update_has_short_ttime(struct bfq_data *bfqd, in bfq_update_has_short_ttime() argument
4390 bfqd->bfq_slice_idle == 0) in bfq_update_has_short_ttime()
4395 bfqd->bfq_wr_min_idle_time)) in bfq_update_has_short_ttime()
4404 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) in bfq_update_has_short_ttime()
4407 bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", in bfq_update_has_short_ttime()
4420 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, in bfq_rq_enqueued() argument
4428 bfq_update_io_thinktime(bfqd, bfqq); in bfq_rq_enqueued()
4429 bfq_update_has_short_ttime(bfqd, bfqq, bic); in bfq_rq_enqueued()
4430 bfq_update_io_seektime(bfqd, bfqq, rq); in bfq_rq_enqueued()
4432 bfq_log_bfqq(bfqd, bfqq, in bfq_rq_enqueued()
4438 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { in bfq_rq_enqueued()
4468 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_rq_enqueued()
4478 bfq_bfqq_expire(bfqd, bfqq, false, in bfq_rq_enqueued()
4484 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) in __bfq_insert_request() argument
4487 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); in __bfq_insert_request()
4509 bfq_merge_bfqqs(bfqd, RQ_BIC(rq), in __bfq_insert_request()
4526 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; in __bfq_insert_request()
4529 bfq_rq_enqueued(bfqd, bfqq, rq); in __bfq_insert_request()
4570 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_insert_request() local
4575 spin_lock_irq(&bfqd->lock); in bfq_insert_request()
4577 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
4581 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
4585 spin_lock_irq(&bfqd->lock); in bfq_insert_request()
4589 list_add(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
4591 list_add_tail(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
4593 idle_timer_disabled = __bfq_insert_request(bfqd, rq); in bfq_insert_request()
4615 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
4633 static void bfq_update_hw_tag(struct bfq_data *bfqd) in bfq_update_hw_tag() argument
4635 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, in bfq_update_hw_tag()
4636 bfqd->rq_in_driver); in bfq_update_hw_tag()
4638 if (bfqd->hw_tag == 1) in bfq_update_hw_tag()
4647 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
4650 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) in bfq_update_hw_tag()
4653 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; in bfq_update_hw_tag()
4654 bfqd->max_rq_in_driver = 0; in bfq_update_hw_tag()
4655 bfqd->hw_tag_samples = 0; in bfq_update_hw_tag()
4658 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) in bfq_completed_request() argument
4663 bfq_update_hw_tag(bfqd); in bfq_completed_request()
4665 bfqd->rq_in_driver--; in bfq_completed_request()
4677 bfq_weights_tree_remove(bfqd, bfqq); in bfq_completed_request()
4688 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); in bfq_completed_request()
4707 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < in bfq_completed_request()
4709 bfq_update_rate_reset(bfqd, NULL); in bfq_completed_request()
4710 bfqd->last_completion = now_ns; in bfq_completed_request()
4724 bfq_bfqq_softrt_next_start(bfqd, bfqq); in bfq_completed_request()
4730 if (bfqd->in_service_queue == bfqq) { in bfq_completed_request()
4733 bfq_arm_slice_timer(bfqd); in bfq_completed_request()
4759 bfq_bfqq_expire(bfqd, bfqq, false, in bfq_completed_request()
4764 bfq_bfqq_expire(bfqd, bfqq, false, in bfq_completed_request()
4768 if (!bfqd->rq_in_driver) in bfq_completed_request()
4769 bfq_schedule_dispatch(bfqd); in bfq_completed_request()
4788 struct bfq_data *bfqd; in bfq_finish_requeue_request() local
4810 bfqd = bfqq->bfqd; in bfq_finish_requeue_request()
4821 spin_lock_irqsave(&bfqd->lock, flags); in bfq_finish_requeue_request()
4823 bfq_completed_request(bfqq, bfqd); in bfq_finish_requeue_request()
4826 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_finish_requeue_request()
4877 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); in bfq_split_bfqq()
4894 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, in bfq_get_bfqq_handle_split() argument
4902 if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) in bfq_get_bfqq_handle_split()
4910 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); in bfq_get_bfqq_handle_split()
4914 if ((bic->was_in_burst_list && bfqd->large_burst) || in bfq_get_bfqq_handle_split()
4949 &bfqd->burst_list); in bfq_get_bfqq_handle_split()
5000 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_init_rq() local
5026 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, in bfq_init_rq()
5032 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); in bfq_init_rq()
5042 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, in bfq_init_rq()
5052 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", in bfq_init_rq()
5064 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { in bfq_init_rq()
5072 bfq_bfqq_resume_state(bfqq, bfqd, bic, in bfq_init_rq()
5078 bfq_handle_burst(bfqd, bfqq); in bfq_init_rq()
5085 struct bfq_data *bfqd = bfqq->bfqd; in bfq_idle_slice_timer_body() local
5089 spin_lock_irqsave(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
5092 if (bfqq != bfqd->in_service_queue) { in bfq_idle_slice_timer_body()
5093 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
5115 bfq_bfqq_expire(bfqd, bfqq, true, reason); in bfq_idle_slice_timer_body()
5118 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
5119 bfq_schedule_dispatch(bfqd); in bfq_idle_slice_timer_body()
5128 struct bfq_data *bfqd = container_of(timer, struct bfq_data, in bfq_idle_slice_timer() local
5130 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_idle_slice_timer()
5146 static void __bfq_put_async_bfqq(struct bfq_data *bfqd, in __bfq_put_async_bfqq() argument
5151 bfq_log(bfqd, "put_async_bfqq: %p", bfqq); in __bfq_put_async_bfqq()
5153 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); in __bfq_put_async_bfqq()
5155 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", in __bfq_put_async_bfqq()
5168 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) in bfq_put_async_queues() argument
5174 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); in bfq_put_async_queues()
5176 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); in bfq_put_async_queues()
5183 static unsigned int bfq_update_depths(struct bfq_data *bfqd, in bfq_update_depths() argument
5199 bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U); in bfq_update_depths()
5205 bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U); in bfq_update_depths()
5215 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); in bfq_update_depths()
5217 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); in bfq_update_depths()
5221 min_shallow = min(min_shallow, bfqd->word_depths[i][j]); in bfq_update_depths()
5228 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_init_hctx() local
5232 min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); in bfq_init_hctx()
5239 struct bfq_data *bfqd = e->elevator_data; in bfq_exit_queue() local
5242 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
5244 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
5245 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) in bfq_exit_queue()
5246 bfq_deactivate_bfqq(bfqd, bfqq, false, false); in bfq_exit_queue()
5247 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
5249 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
5253 bfqg_and_blkg_put(bfqd->root_group); in bfq_exit_queue()
5255 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); in bfq_exit_queue()
5257 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
5258 bfq_put_async_queues(bfqd, bfqd->root_group); in bfq_exit_queue()
5259 kfree(bfqd->root_group); in bfq_exit_queue()
5260 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
5263 kfree(bfqd); in bfq_exit_queue()
5267 struct bfq_data *bfqd) in bfq_init_root_group() argument
5274 root_group->bfqd = bfqd; in bfq_init_root_group()
5284 struct bfq_data *bfqd; in bfq_init_queue() local
5291 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); in bfq_init_queue()
5292 if (!bfqd) { in bfq_init_queue()
5296 eq->elevator_data = bfqd; in bfq_init_queue()
5307 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); in bfq_init_queue()
5308 bfqd->oom_bfqq.ref++; in bfq_init_queue()
5309 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; in bfq_init_queue()
5310 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; in bfq_init_queue()
5311 bfqd->oom_bfqq.entity.new_weight = in bfq_init_queue()
5312 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); in bfq_init_queue()
5315 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); in bfq_init_queue()
5322 bfqd->oom_bfqq.entity.prio_changed = 1; in bfq_init_queue()
5324 bfqd->queue = q; in bfq_init_queue()
5326 INIT_LIST_HEAD(&bfqd->dispatch); in bfq_init_queue()
5328 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, in bfq_init_queue()
5330 bfqd->idle_slice_timer.function = bfq_idle_slice_timer; in bfq_init_queue()
5332 bfqd->queue_weights_tree = RB_ROOT; in bfq_init_queue()
5333 bfqd->group_weights_tree = RB_ROOT; in bfq_init_queue()
5335 INIT_LIST_HEAD(&bfqd->active_list); in bfq_init_queue()
5336 INIT_LIST_HEAD(&bfqd->idle_list); in bfq_init_queue()
5337 INIT_HLIST_HEAD(&bfqd->burst_list); in bfq_init_queue()
5339 bfqd->hw_tag = -1; in bfq_init_queue()
5341 bfqd->bfq_max_budget = bfq_default_max_budget; in bfq_init_queue()
5343 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; in bfq_init_queue()
5344 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; in bfq_init_queue()
5345 bfqd->bfq_back_max = bfq_back_max; in bfq_init_queue()
5346 bfqd->bfq_back_penalty = bfq_back_penalty; in bfq_init_queue()
5347 bfqd->bfq_slice_idle = bfq_slice_idle; in bfq_init_queue()
5348 bfqd->bfq_timeout = bfq_timeout; in bfq_init_queue()
5350 bfqd->bfq_requests_within_timer = 120; in bfq_init_queue()
5352 bfqd->bfq_large_burst_thresh = 8; in bfq_init_queue()
5353 bfqd->bfq_burst_interval = msecs_to_jiffies(180); in bfq_init_queue()
5355 bfqd->low_latency = true; in bfq_init_queue()
5360 bfqd->bfq_wr_coeff = 30; in bfq_init_queue()
5361 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); in bfq_init_queue()
5362 bfqd->bfq_wr_max_time = 0; in bfq_init_queue()
5363 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); in bfq_init_queue()
5364 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); in bfq_init_queue()
5365 bfqd->bfq_wr_max_softrt_rate = 7000; /* in bfq_init_queue()
5371 bfqd->wr_busy_queues = 0; in bfq_init_queue()
5377 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * in bfq_init_queue()
5378 ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; in bfq_init_queue()
5379 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; in bfq_init_queue()
5381 spin_lock_init(&bfqd->lock); in bfq_init_queue()
5398 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); in bfq_init_queue()
5399 if (!bfqd->root_group) in bfq_init_queue()
5401 bfq_init_root_group(bfqd->root_group, bfqd); in bfq_init_queue()
5402 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); in bfq_init_queue()
5408 kfree(bfqd); in bfq_init_queue()
5445 struct bfq_data *bfqd = e->elevator_data; \
5453 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
5454 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
5455 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
5456 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
5457 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
5458 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
5459 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
5460 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
5461 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
5467 struct bfq_data *bfqd = e->elevator_data; \
5472 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
5479 struct bfq_data *bfqd = e->elevator_data; \
5498 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
5500 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
5502 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
5503 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
5505 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
5511 struct bfq_data *bfqd = e->elevator_data; \
5525 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
5532 struct bfq_data *bfqd = e->elevator_data; in bfq_max_budget_store() local
5541 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_max_budget_store()
5545 bfqd->bfq_max_budget = __data; in bfq_max_budget_store()
5548 bfqd->bfq_user_max_budget = __data; in bfq_max_budget_store()
5560 struct bfq_data *bfqd = e->elevator_data; in bfq_timeout_sync_store() local
5573 bfqd->bfq_timeout = msecs_to_jiffies(__data); in bfq_timeout_sync_store()
5574 if (bfqd->bfq_user_max_budget == 0) in bfq_timeout_sync_store()
5575 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_timeout_sync_store()
5583 struct bfq_data *bfqd = e->elevator_data; in bfq_strict_guarantees_store() local
5593 if (!bfqd->strict_guarantees && __data == 1 in bfq_strict_guarantees_store()
5594 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) in bfq_strict_guarantees_store()
5595 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; in bfq_strict_guarantees_store()
5597 bfqd->strict_guarantees = __data; in bfq_strict_guarantees_store()
5605 struct bfq_data *bfqd = e->elevator_data; in bfq_low_latency_store() local
5615 if (__data == 0 && bfqd->low_latency != 0) in bfq_low_latency_store()
5616 bfq_end_wr(bfqd); in bfq_low_latency_store()
5617 bfqd->low_latency = __data; in bfq_low_latency_store()