Lines Matching refs:q

79 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)  in blk_queue_flag_set()  argument
83 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_set()
84 queue_flag_set(flag, q); in blk_queue_flag_set()
85 spin_unlock_irqrestore(q->queue_lock, flags); in blk_queue_flag_set()
94 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument
98 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_clear()
99 queue_flag_clear(flag, q); in blk_queue_flag_clear()
100 spin_unlock_irqrestore(q->queue_lock, flags); in blk_queue_flag_clear()
112 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_test_and_set() argument
117 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_test_and_set()
118 res = queue_flag_test_and_set(flag, q); in blk_queue_flag_test_and_set()
119 spin_unlock_irqrestore(q->queue_lock, flags); in blk_queue_flag_test_and_set()
133 bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_test_and_clear() argument
138 spin_lock_irqsave(q->queue_lock, flags); in blk_queue_flag_test_and_clear()
139 res = queue_flag_test_and_clear(flag, q); in blk_queue_flag_test_and_clear()
140 spin_unlock_irqrestore(q->queue_lock, flags); in blk_queue_flag_test_and_clear()
155 if (rl == &rl->q->root_rl) in blk_clear_congested()
156 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); in blk_clear_congested()
166 if (rl == &rl->q->root_rl) in blk_set_congested()
167 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); in blk_set_congested()
171 void blk_queue_congestion_threshold(struct request_queue *q) in blk_queue_congestion_threshold() argument
175 nr = q->nr_requests - (q->nr_requests / 8) + 1; in blk_queue_congestion_threshold()
176 if (nr > q->nr_requests) in blk_queue_congestion_threshold()
177 nr = q->nr_requests; in blk_queue_congestion_threshold()
178 q->nr_congestion_on = nr; in blk_queue_congestion_threshold()
180 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; in blk_queue_congestion_threshold()
183 q->nr_congestion_off = nr; in blk_queue_congestion_threshold()
186 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
193 rq->q = q; in blk_rq_init()
296 struct request_queue *q; in blk_delay_work() local
298 q = container_of(work, struct request_queue, delay_work.work); in blk_delay_work()
299 spin_lock_irq(q->queue_lock); in blk_delay_work()
300 __blk_run_queue(q); in blk_delay_work()
301 spin_unlock_irq(q->queue_lock); in blk_delay_work()
314 void blk_delay_queue(struct request_queue *q, unsigned long msecs) in blk_delay_queue() argument
316 lockdep_assert_held(q->queue_lock); in blk_delay_queue()
317 WARN_ON_ONCE(q->mq_ops); in blk_delay_queue()
319 if (likely(!blk_queue_dead(q))) in blk_delay_queue()
320 queue_delayed_work(kblockd_workqueue, &q->delay_work, in blk_delay_queue()
334 void blk_start_queue_async(struct request_queue *q) in blk_start_queue_async() argument
336 lockdep_assert_held(q->queue_lock); in blk_start_queue_async()
337 WARN_ON_ONCE(q->mq_ops); in blk_start_queue_async()
339 queue_flag_clear(QUEUE_FLAG_STOPPED, q); in blk_start_queue_async()
340 blk_run_queue_async(q); in blk_start_queue_async()
353 void blk_start_queue(struct request_queue *q) in blk_start_queue() argument
355 lockdep_assert_held(q->queue_lock); in blk_start_queue()
356 WARN_ON_ONCE(q->mq_ops); in blk_start_queue()
358 queue_flag_clear(QUEUE_FLAG_STOPPED, q); in blk_start_queue()
359 __blk_run_queue(q); in blk_start_queue()
377 void blk_stop_queue(struct request_queue *q) in blk_stop_queue() argument
379 lockdep_assert_held(q->queue_lock); in blk_stop_queue()
380 WARN_ON_ONCE(q->mq_ops); in blk_stop_queue()
382 cancel_delayed_work(&q->delay_work); in blk_stop_queue()
383 queue_flag_set(QUEUE_FLAG_STOPPED, q); in blk_stop_queue()
405 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
407 del_timer_sync(&q->timeout); in blk_sync_queue()
408 cancel_work_sync(&q->timeout_work); in blk_sync_queue()
410 if (q->mq_ops) { in blk_sync_queue()
414 cancel_delayed_work_sync(&q->requeue_work); in blk_sync_queue()
415 queue_for_each_hw_ctx(q, hctx, i) in blk_sync_queue()
418 cancel_delayed_work_sync(&q->delay_work); in blk_sync_queue()
430 int blk_set_preempt_only(struct request_queue *q) in blk_set_preempt_only() argument
432 return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); in blk_set_preempt_only()
436 void blk_clear_preempt_only(struct request_queue *q) in blk_clear_preempt_only() argument
438 blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); in blk_clear_preempt_only()
439 wake_up_all(&q->mq_freeze_wq); in blk_clear_preempt_only()
454 inline void __blk_run_queue_uncond(struct request_queue *q) in __blk_run_queue_uncond() argument
456 lockdep_assert_held(q->queue_lock); in __blk_run_queue_uncond()
457 WARN_ON_ONCE(q->mq_ops); in __blk_run_queue_uncond()
459 if (unlikely(blk_queue_dead(q))) in __blk_run_queue_uncond()
469 q->request_fn_active++; in __blk_run_queue_uncond()
470 q->request_fn(q); in __blk_run_queue_uncond()
471 q->request_fn_active--; in __blk_run_queue_uncond()
482 void __blk_run_queue(struct request_queue *q) in __blk_run_queue() argument
484 lockdep_assert_held(q->queue_lock); in __blk_run_queue()
485 WARN_ON_ONCE(q->mq_ops); in __blk_run_queue()
487 if (unlikely(blk_queue_stopped(q))) in __blk_run_queue()
490 __blk_run_queue_uncond(q); in __blk_run_queue()
507 void blk_run_queue_async(struct request_queue *q) in blk_run_queue_async() argument
509 lockdep_assert_held(q->queue_lock); in blk_run_queue_async()
510 WARN_ON_ONCE(q->mq_ops); in blk_run_queue_async()
512 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) in blk_run_queue_async()
513 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); in blk_run_queue_async()
525 void blk_run_queue(struct request_queue *q) in blk_run_queue() argument
529 WARN_ON_ONCE(q->mq_ops); in blk_run_queue()
531 spin_lock_irqsave(q->queue_lock, flags); in blk_run_queue()
532 __blk_run_queue(q); in blk_run_queue()
533 spin_unlock_irqrestore(q->queue_lock, flags); in blk_run_queue()
537 void blk_put_queue(struct request_queue *q) in blk_put_queue() argument
539 kobject_put(&q->kobj); in blk_put_queue()
552 static void __blk_drain_queue(struct request_queue *q, bool drain_all) in __blk_drain_queue() argument
553 __releases(q->queue_lock) in __blk_drain_queue()
554 __acquires(q->queue_lock) in __blk_drain_queue()
558 lockdep_assert_held(q->queue_lock); in __blk_drain_queue()
559 WARN_ON_ONCE(q->mq_ops); in __blk_drain_queue()
568 if (q->elevator) in __blk_drain_queue()
569 elv_drain_elevator(q); in __blk_drain_queue()
571 blkcg_drain_queue(q); in __blk_drain_queue()
580 if (!list_empty(&q->queue_head) && q->request_fn) in __blk_drain_queue()
581 __blk_run_queue(q); in __blk_drain_queue()
583 drain |= q->nr_rqs_elvpriv; in __blk_drain_queue()
584 drain |= q->request_fn_active; in __blk_drain_queue()
592 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in __blk_drain_queue()
593 drain |= !list_empty(&q->queue_head); in __blk_drain_queue()
595 drain |= q->nr_rqs[i]; in __blk_drain_queue()
596 drain |= q->in_flight[i]; in __blk_drain_queue()
605 spin_unlock_irq(q->queue_lock); in __blk_drain_queue()
609 spin_lock_irq(q->queue_lock); in __blk_drain_queue()
617 if (q->request_fn) { in __blk_drain_queue()
620 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
626 void blk_drain_queue(struct request_queue *q) in blk_drain_queue() argument
628 spin_lock_irq(q->queue_lock); in blk_drain_queue()
629 __blk_drain_queue(q, true); in blk_drain_queue()
630 spin_unlock_irq(q->queue_lock); in blk_drain_queue()
643 void blk_queue_bypass_start(struct request_queue *q) in blk_queue_bypass_start() argument
645 WARN_ON_ONCE(q->mq_ops); in blk_queue_bypass_start()
647 spin_lock_irq(q->queue_lock); in blk_queue_bypass_start()
648 q->bypass_depth++; in blk_queue_bypass_start()
649 queue_flag_set(QUEUE_FLAG_BYPASS, q); in blk_queue_bypass_start()
650 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_start()
657 if (blk_queue_init_done(q)) { in blk_queue_bypass_start()
658 spin_lock_irq(q->queue_lock); in blk_queue_bypass_start()
659 __blk_drain_queue(q, false); in blk_queue_bypass_start()
660 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_start()
677 void blk_queue_bypass_end(struct request_queue *q) in blk_queue_bypass_end() argument
679 spin_lock_irq(q->queue_lock); in blk_queue_bypass_end()
680 if (!--q->bypass_depth) in blk_queue_bypass_end()
681 queue_flag_clear(QUEUE_FLAG_BYPASS, q); in blk_queue_bypass_end()
682 WARN_ON_ONCE(q->bypass_depth < 0); in blk_queue_bypass_end()
683 spin_unlock_irq(q->queue_lock); in blk_queue_bypass_end()
687 void blk_set_queue_dying(struct request_queue *q) in blk_set_queue_dying() argument
689 blk_queue_flag_set(QUEUE_FLAG_DYING, q); in blk_set_queue_dying()
696 blk_freeze_queue_start(q); in blk_set_queue_dying()
698 if (q->mq_ops) in blk_set_queue_dying()
699 blk_mq_wake_waiters(q); in blk_set_queue_dying()
703 spin_lock_irq(q->queue_lock); in blk_set_queue_dying()
704 blk_queue_for_each_rl(rl, q) { in blk_set_queue_dying()
710 spin_unlock_irq(q->queue_lock); in blk_set_queue_dying()
714 wake_up_all(&q->mq_freeze_wq); in blk_set_queue_dying()
719 void blk_exit_queue(struct request_queue *q) in blk_exit_queue() argument
726 if (q->elevator) { in blk_exit_queue()
727 ioc_clear_queue(q); in blk_exit_queue()
728 elevator_exit(q, q->elevator); in blk_exit_queue()
729 q->elevator = NULL; in blk_exit_queue()
737 blkcg_exit_queue(q); in blk_exit_queue()
744 bdi_put(q->backing_dev_info); in blk_exit_queue()
754 void blk_cleanup_queue(struct request_queue *q) in blk_cleanup_queue() argument
756 spinlock_t *lock = q->queue_lock; in blk_cleanup_queue()
759 mutex_lock(&q->sysfs_lock); in blk_cleanup_queue()
760 blk_set_queue_dying(q); in blk_cleanup_queue()
772 q->bypass_depth++; in blk_cleanup_queue()
773 queue_flag_set(QUEUE_FLAG_BYPASS, q); in blk_cleanup_queue()
775 queue_flag_set(QUEUE_FLAG_NOMERGES, q); in blk_cleanup_queue()
776 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in blk_cleanup_queue()
777 queue_flag_set(QUEUE_FLAG_DYING, q); in blk_cleanup_queue()
779 mutex_unlock(&q->sysfs_lock); in blk_cleanup_queue()
785 blk_freeze_queue(q); in blk_cleanup_queue()
787 queue_flag_set(QUEUE_FLAG_DEAD, q); in blk_cleanup_queue()
800 if (q->mq_ops && blk_queue_init_done(q)) in blk_cleanup_queue()
801 blk_mq_quiesce_queue(q); in blk_cleanup_queue()
807 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); in blk_cleanup_queue()
808 blk_sync_queue(q); in blk_cleanup_queue()
814 WARN_ON_ONCE(q->kobj.state_in_sysfs); in blk_cleanup_queue()
816 blk_exit_queue(q); in blk_cleanup_queue()
818 if (q->mq_ops) in blk_cleanup_queue()
819 blk_mq_free_queue(q); in blk_cleanup_queue()
820 percpu_ref_exit(&q->q_usage_counter); in blk_cleanup_queue()
823 if (q->queue_lock != &q->__queue_lock) in blk_cleanup_queue()
824 q->queue_lock = &q->__queue_lock; in blk_cleanup_queue()
828 blk_put_queue(q); in blk_cleanup_queue()
835 struct request_queue *q = data; in alloc_request_simple() local
837 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); in alloc_request_simple()
847 struct request_queue *q = data; in alloc_request_size() local
850 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, in alloc_request_size()
851 q->node); in alloc_request_size()
852 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { in alloc_request_size()
861 struct request_queue *q = data; in free_request_size() local
863 if (q->exit_rq_fn) in free_request_size()
864 q->exit_rq_fn(q, element); in free_request_size()
868 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
871 if (unlikely(rl->rq_pool) || q->mq_ops) in blk_init_rl()
874 rl->q = q; in blk_init_rl()
880 if (q->cmd_size) { in blk_init_rl()
883 q, gfp_mask, q->node); in blk_init_rl()
887 q, gfp_mask, q->node); in blk_init_rl()
892 if (rl != &q->root_rl) in blk_init_rl()
893 WARN_ON_ONCE(!blk_get_queue(q)); in blk_init_rl()
898 void blk_exit_rl(struct request_queue *q, struct request_list *rl) in blk_exit_rl() argument
902 if (rl != &q->root_rl) in blk_exit_rl()
903 blk_put_queue(q); in blk_exit_rl()
918 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) in blk_queue_enter() argument
926 if (percpu_ref_tryget_live(&q->q_usage_counter)) { in blk_queue_enter()
932 if (preempt || !blk_queue_preempt_only(q)) { in blk_queue_enter()
935 percpu_ref_put(&q->q_usage_counter); in blk_queue_enter()
955 wait_event(q->mq_freeze_wq, in blk_queue_enter()
956 (atomic_read(&q->mq_freeze_depth) == 0 && in blk_queue_enter()
957 (preempt || !blk_queue_preempt_only(q))) || in blk_queue_enter()
958 blk_queue_dying(q)); in blk_queue_enter()
959 if (blk_queue_dying(q)) in blk_queue_enter()
964 void blk_queue_exit(struct request_queue *q) in blk_queue_exit() argument
966 percpu_ref_put(&q->q_usage_counter); in blk_queue_exit()
971 struct request_queue *q = in blk_queue_usage_counter_release() local
974 wake_up_all(&q->mq_freeze_wq); in blk_queue_usage_counter_release()
979 struct request_queue *q = from_timer(q, t, timeout); in blk_rq_timed_out_timer() local
981 kblockd_schedule_work(&q->timeout_work); in blk_rq_timed_out_timer()
1000 struct request_queue *q; in blk_alloc_queue_node() local
1003 q = kmem_cache_alloc_node(blk_requestq_cachep, in blk_alloc_queue_node()
1005 if (!q) in blk_alloc_queue_node()
1008 INIT_LIST_HEAD(&q->queue_head); in blk_alloc_queue_node()
1009 q->last_merge = NULL; in blk_alloc_queue_node()
1010 q->end_sector = 0; in blk_alloc_queue_node()
1011 q->boundary_rq = NULL; in blk_alloc_queue_node()
1013 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); in blk_alloc_queue_node()
1014 if (q->id < 0) in blk_alloc_queue_node()
1017 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in blk_alloc_queue_node()
1021 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); in blk_alloc_queue_node()
1022 if (!q->backing_dev_info) in blk_alloc_queue_node()
1025 q->stats = blk_alloc_queue_stats(); in blk_alloc_queue_node()
1026 if (!q->stats) in blk_alloc_queue_node()
1029 q->backing_dev_info->ra_pages = in blk_alloc_queue_node()
1031 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; in blk_alloc_queue_node()
1032 q->backing_dev_info->name = "block"; in blk_alloc_queue_node()
1033 q->node = node_id; in blk_alloc_queue_node()
1035 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, in blk_alloc_queue_node()
1037 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); in blk_alloc_queue_node()
1038 INIT_WORK(&q->timeout_work, NULL); in blk_alloc_queue_node()
1039 INIT_LIST_HEAD(&q->timeout_list); in blk_alloc_queue_node()
1040 INIT_LIST_HEAD(&q->icq_list); in blk_alloc_queue_node()
1042 INIT_LIST_HEAD(&q->blkg_list); in blk_alloc_queue_node()
1044 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); in blk_alloc_queue_node()
1046 kobject_init(&q->kobj, &blk_queue_ktype); in blk_alloc_queue_node()
1049 mutex_init(&q->blk_trace_mutex); in blk_alloc_queue_node()
1051 mutex_init(&q->sysfs_lock); in blk_alloc_queue_node()
1052 spin_lock_init(&q->__queue_lock); in blk_alloc_queue_node()
1054 if (!q->mq_ops) in blk_alloc_queue_node()
1055 q->queue_lock = lock ? : &q->__queue_lock; in blk_alloc_queue_node()
1063 q->bypass_depth = 1; in blk_alloc_queue_node()
1064 queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); in blk_alloc_queue_node()
1066 init_waitqueue_head(&q->mq_freeze_wq); in blk_alloc_queue_node()
1072 if (percpu_ref_init(&q->q_usage_counter, in blk_alloc_queue_node()
1077 if (blkcg_init_queue(q)) in blk_alloc_queue_node()
1080 return q; in blk_alloc_queue_node()
1083 percpu_ref_exit(&q->q_usage_counter); in blk_alloc_queue_node()
1085 blk_free_queue_stats(q->stats); in blk_alloc_queue_node()
1087 bdi_put(q->backing_dev_info); in blk_alloc_queue_node()
1089 bioset_exit(&q->bio_split); in blk_alloc_queue_node()
1091 ida_simple_remove(&blk_queue_ida, q->id); in blk_alloc_queue_node()
1093 kmem_cache_free(blk_requestq_cachep, q); in blk_alloc_queue_node()
1140 struct request_queue *q; in blk_init_queue_node() local
1142 q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock); in blk_init_queue_node()
1143 if (!q) in blk_init_queue_node()
1146 q->request_fn = rfn; in blk_init_queue_node()
1147 if (blk_init_allocated_queue(q) < 0) { in blk_init_queue_node()
1148 blk_cleanup_queue(q); in blk_init_queue_node()
1152 return q; in blk_init_queue_node()
1156 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
1159 int blk_init_allocated_queue(struct request_queue *q) in blk_init_allocated_queue() argument
1161 WARN_ON_ONCE(q->mq_ops); in blk_init_allocated_queue()
1163 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); in blk_init_allocated_queue()
1164 if (!q->fq) in blk_init_allocated_queue()
1167 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL)) in blk_init_allocated_queue()
1170 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) in blk_init_allocated_queue()
1173 INIT_WORK(&q->timeout_work, blk_timeout_work); in blk_init_allocated_queue()
1174 q->queue_flags |= QUEUE_FLAG_DEFAULT; in blk_init_allocated_queue()
1179 blk_queue_make_request(q, blk_queue_bio); in blk_init_allocated_queue()
1181 q->sg_reserved_size = INT_MAX; in blk_init_allocated_queue()
1183 if (elevator_init(q)) in blk_init_allocated_queue()
1188 if (q->exit_rq_fn) in blk_init_allocated_queue()
1189 q->exit_rq_fn(q, q->fq->flush_rq); in blk_init_allocated_queue()
1191 blk_free_flush_queue(q->fq); in blk_init_allocated_queue()
1192 q->fq = NULL; in blk_init_allocated_queue()
1197 bool blk_get_queue(struct request_queue *q) in blk_get_queue() argument
1199 if (likely(!blk_queue_dying(q))) { in blk_get_queue()
1200 __blk_get_queue(q); in blk_get_queue()
1211 elv_put_request(rl->q, rq); in blk_free_request()
1223 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) in ioc_batching() argument
1233 return ioc->nr_batch_requests == q->nr_batching || in ioc_batching()
1244 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) in ioc_set_batching() argument
1246 if (!ioc || ioc_batching(q, ioc)) in ioc_set_batching()
1249 ioc->nr_batch_requests = q->nr_batching; in ioc_set_batching()
1255 struct request_queue *q = rl->q; in __freed_request() local
1257 if (rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
1260 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
1275 struct request_queue *q = rl->q; in freed_request() local
1277 q->nr_rqs[sync]--; in freed_request()
1280 q->nr_rqs_elvpriv--; in freed_request()
1288 int blk_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_update_nr_requests() argument
1293 WARN_ON_ONCE(q->mq_ops); in blk_update_nr_requests()
1295 spin_lock_irq(q->queue_lock); in blk_update_nr_requests()
1296 q->nr_requests = nr; in blk_update_nr_requests()
1297 blk_queue_congestion_threshold(q); in blk_update_nr_requests()
1298 on_thresh = queue_congestion_on_threshold(q); in blk_update_nr_requests()
1299 off_thresh = queue_congestion_off_threshold(q); in blk_update_nr_requests()
1301 blk_queue_for_each_rl(rl, q) { in blk_update_nr_requests()
1312 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
1319 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
1327 spin_unlock_irq(q->queue_lock); in blk_update_nr_requests()
1349 struct request_queue *q = rl->q; in __get_request() local
1351 struct elevator_type *et = q->elevator->type; in __get_request()
1358 lockdep_assert_held(q->queue_lock); in __get_request()
1360 if (unlikely(blk_queue_dying(q))) in __get_request()
1363 may_queue = elv_may_queue(q, op); in __get_request()
1367 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { in __get_request()
1368 if (rl->count[is_sync]+1 >= q->nr_requests) { in __get_request()
1376 ioc_set_batching(q, ioc); in __get_request()
1380 && !ioc_batching(q, ioc)) { in __get_request()
1398 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) in __get_request()
1401 q->nr_rqs[is_sync]++; in __get_request()
1418 if (!op_is_flush(op) && !blk_queue_bypass(q)) { in __get_request()
1420 q->nr_rqs_elvpriv++; in __get_request()
1422 icq = ioc_lookup_icq(ioc, q); in __get_request()
1425 if (blk_queue_io_stat(q)) in __get_request()
1427 spin_unlock_irq(q->queue_lock); in __get_request()
1434 blk_rq_init(q, rq); in __get_request()
1445 icq = ioc_create_icq(ioc, q, gfp_mask); in __get_request()
1451 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1465 if (ioc_batching(q, ioc)) in __get_request()
1468 trace_block_getrq(q, bio, op); in __get_request()
1479 __func__, dev_name(q->backing_dev_info->dev)); in __get_request()
1484 spin_lock_irq(q->queue_lock); in __get_request()
1485 q->nr_rqs_elvpriv--; in __get_request()
1486 spin_unlock_irq(q->queue_lock); in __get_request()
1497 spin_lock_irq(q->queue_lock); in __get_request()
1528 static struct request *get_request(struct request_queue *q, unsigned int op, in get_request() argument
1536 lockdep_assert_held(q->queue_lock); in get_request()
1537 WARN_ON_ONCE(q->mq_ops); in get_request()
1539 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1550 if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) { in get_request()
1559 trace_block_sleeprq(q, bio, op); in get_request()
1561 spin_unlock_irq(q->queue_lock); in get_request()
1569 ioc_set_batching(q, current->io_context); in get_request()
1571 spin_lock_irq(q->queue_lock); in get_request()
1578 static struct request *blk_old_get_request(struct request_queue *q, in blk_old_get_request() argument
1585 WARN_ON_ONCE(q->mq_ops); in blk_old_get_request()
1588 create_io_context(gfp_mask, q->node); in blk_old_get_request()
1590 ret = blk_queue_enter(q, flags); in blk_old_get_request()
1593 spin_lock_irq(q->queue_lock); in blk_old_get_request()
1594 rq = get_request(q, op, NULL, flags, gfp_mask); in blk_old_get_request()
1596 spin_unlock_irq(q->queue_lock); in blk_old_get_request()
1597 blk_queue_exit(q); in blk_old_get_request()
1614 struct request *blk_get_request(struct request_queue *q, unsigned int op, in blk_get_request() argument
1622 if (q->mq_ops) { in blk_get_request()
1623 req = blk_mq_alloc_request(q, op, flags); in blk_get_request()
1624 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) in blk_get_request()
1625 q->mq_ops->initialize_rq_fn(req); in blk_get_request()
1627 req = blk_old_get_request(q, op, flags); in blk_get_request()
1628 if (!IS_ERR(req) && q->initialize_rq_fn) in blk_get_request()
1629 q->initialize_rq_fn(req); in blk_get_request()
1646 void blk_requeue_request(struct request_queue *q, struct request *rq) in blk_requeue_request() argument
1648 lockdep_assert_held(q->queue_lock); in blk_requeue_request()
1649 WARN_ON_ONCE(q->mq_ops); in blk_requeue_request()
1653 trace_block_rq_requeue(q, rq); in blk_requeue_request()
1654 rq_qos_requeue(q, rq); in blk_requeue_request()
1657 blk_queue_end_tag(q, rq); in blk_requeue_request()
1661 elv_requeue_request(q, rq); in blk_requeue_request()
1665 static void add_acct_request(struct request_queue *q, struct request *rq, in add_acct_request() argument
1669 __elv_add_request(q, rq, where); in add_acct_request()
1672 static void part_round_stats_single(struct request_queue *q, int cpu, in part_round_stats_single() argument
1701 void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) in part_round_stats() argument
1720 part_in_flight(q, part, inflight); in part_round_stats()
1723 part_round_stats_single(q, cpu, part2, now, inflight[1]); in part_round_stats()
1725 part_round_stats_single(q, cpu, part, now, inflight[0]); in part_round_stats()
1732 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) in blk_pm_put_request()
1733 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_put_request()
1739 void __blk_put_request(struct request_queue *q, struct request *req) in __blk_put_request() argument
1743 if (unlikely(!q)) in __blk_put_request()
1746 if (q->mq_ops) { in __blk_put_request()
1751 lockdep_assert_held(q->queue_lock); in __blk_put_request()
1756 elv_completed_request(q, req); in __blk_put_request()
1761 rq_qos_done(q, req); in __blk_put_request()
1777 blk_queue_exit(q); in __blk_put_request()
1784 struct request_queue *q = req->q; in blk_put_request() local
1786 if (q->mq_ops) in blk_put_request()
1791 spin_lock_irqsave(q->queue_lock, flags); in blk_put_request()
1792 __blk_put_request(q, req); in blk_put_request()
1793 spin_unlock_irqrestore(q->queue_lock, flags); in blk_put_request()
1798 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, in bio_attempt_back_merge() argument
1803 if (!ll_back_merge_fn(q, req, bio)) in bio_attempt_back_merge()
1806 trace_block_bio_backmerge(q, req, bio); in bio_attempt_back_merge()
1820 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, in bio_attempt_front_merge() argument
1825 if (!ll_front_merge_fn(q, req, bio)) in bio_attempt_front_merge()
1828 trace_block_bio_frontmerge(q, req, bio); in bio_attempt_front_merge()
1844 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, in bio_attempt_discard_merge() argument
1849 if (segments >= queue_max_discard_segments(q)) in bio_attempt_discard_merge()
1864 req_set_nomerge(q, req); in bio_attempt_discard_merge()
1890 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1903 if (q->mq_ops) in blk_attempt_plug_merge()
1911 if (rq->q == q) { in blk_attempt_plug_merge()
1922 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1927 merged = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1930 merged = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1933 merged = bio_attempt_discard_merge(q, rq, bio); in blk_attempt_plug_merge()
1946 unsigned int blk_plug_queued_count(struct request_queue *q) in blk_plug_queued_count() argument
1957 if (q->mq_ops) in blk_plug_queued_count()
1963 if (rq->q == q) in blk_plug_queued_count()
1985 blk_rq_bio_prep(req->q, req, bio); in blk_init_request_from_bio()
1989 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) in blk_queue_bio() argument
2001 blk_queue_bounce(q, &bio); in blk_queue_bio()
2003 blk_queue_split(q, &bio); in blk_queue_bio()
2009 spin_lock_irq(q->queue_lock); in blk_queue_bio()
2018 if (!blk_queue_nomerges(q)) { in blk_queue_bio()
2019 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) in blk_queue_bio()
2022 request_count = blk_plug_queued_count(q); in blk_queue_bio()
2024 spin_lock_irq(q->queue_lock); in blk_queue_bio()
2026 switch (elv_merge(q, &req, bio)) { in blk_queue_bio()
2028 if (!bio_attempt_back_merge(q, req, bio)) in blk_queue_bio()
2030 elv_bio_merged(q, req, bio); in blk_queue_bio()
2031 free = attempt_back_merge(q, req); in blk_queue_bio()
2033 __blk_put_request(q, free); in blk_queue_bio()
2035 elv_merged_request(q, req, ELEVATOR_BACK_MERGE); in blk_queue_bio()
2038 if (!bio_attempt_front_merge(q, req, bio)) in blk_queue_bio()
2040 elv_bio_merged(q, req, bio); in blk_queue_bio()
2041 free = attempt_front_merge(q, req); in blk_queue_bio()
2043 __blk_put_request(q, free); in blk_queue_bio()
2045 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE); in blk_queue_bio()
2052 rq_qos_throttle(q, bio, q->queue_lock); in blk_queue_bio()
2058 blk_queue_enter_live(q); in blk_queue_bio()
2059 req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO); in blk_queue_bio()
2061 blk_queue_exit(q); in blk_queue_bio()
2062 rq_qos_cleanup(q, bio); in blk_queue_bio()
2071 rq_qos_track(q, req, bio); in blk_queue_bio()
2081 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) in blk_queue_bio()
2094 trace_block_plug(q); in blk_queue_bio()
2100 trace_block_plug(q); in blk_queue_bio()
2106 spin_lock_irq(q->queue_lock); in blk_queue_bio()
2107 add_acct_request(q, req, where); in blk_queue_bio()
2108 __blk_run_queue(q); in blk_queue_bio()
2110 spin_unlock_irq(q->queue_lock); in blk_queue_bio()
2247 struct request_queue *q; in generic_make_request_checks() local
2254 q = bio->bi_disk->queue; in generic_make_request_checks()
2255 if (unlikely(!q)) { in generic_make_request_checks()
2267 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) in generic_make_request_checks()
2289 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { in generic_make_request_checks()
2299 if (!blk_queue_discard(q)) in generic_make_request_checks()
2303 if (!blk_queue_secure_erase(q)) in generic_make_request_checks()
2307 if (!q->limits.max_write_same_sectors) in generic_make_request_checks()
2312 if (!blk_queue_is_zoned(q)) in generic_make_request_checks()
2316 if (!q->limits.max_write_zeroes_sectors) in generic_make_request_checks()
2329 create_io_context(GFP_ATOMIC, q->node); in generic_make_request_checks()
2331 if (!blkcg_bio_issue_check(q, bio)) in generic_make_request_checks()
2335 trace_block_bio_queue(q, bio); in generic_make_request_checks()
2386 struct request_queue *q = bio->bi_disk->queue; in generic_make_request() local
2392 blk_queue_enter_live(q); in generic_make_request()
2393 else if (blk_queue_enter(q, flags) < 0) { in generic_make_request()
2394 if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT)) in generic_make_request()
2439 if (unlikely(q != bio->bi_disk->queue)) { in generic_make_request()
2440 if (q) in generic_make_request()
2441 blk_queue_exit(q); in generic_make_request()
2442 q = bio->bi_disk->queue; in generic_make_request()
2446 if (blk_queue_enter(q, flags) < 0) { in generic_make_request()
2448 q = NULL; in generic_make_request()
2458 ret = q->make_request_fn(q, bio); in generic_make_request()
2466 if (q == bio->bi_disk->queue) in generic_make_request()
2475 if (unlikely(!blk_queue_dying(q) && in generic_make_request()
2486 if (q) in generic_make_request()
2487 blk_queue_exit(q); in generic_make_request()
2504 struct request_queue *q = bio->bi_disk->queue; in direct_make_request() local
2511 if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { in direct_make_request()
2512 if (nowait && !blk_queue_dying(q)) in direct_make_request()
2520 ret = q->make_request_fn(q, bio); in direct_make_request()
2521 blk_queue_exit(q); in direct_make_request()
2570 bool blk_poll(struct request_queue *q, blk_qc_t cookie) in blk_poll() argument
2572 if (!q->poll_fn || !blk_qc_t_valid(cookie)) in blk_poll()
2577 return q->poll_fn(q, cookie); in blk_poll()
2598 static int blk_cloned_rq_check_limits(struct request_queue *q, in blk_cloned_rq_check_limits() argument
2601 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { in blk_cloned_rq_check_limits()
2613 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_cloned_rq_check_limits()
2626 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
2631 if (blk_cloned_rq_check_limits(q, rq)) in blk_insert_cloned_request()
2638 if (q->mq_ops) { in blk_insert_cloned_request()
2639 if (blk_queue_io_stat(q)) in blk_insert_cloned_request()
2649 spin_lock_irqsave(q->queue_lock, flags); in blk_insert_cloned_request()
2650 if (unlikely(blk_queue_dying(q))) { in blk_insert_cloned_request()
2651 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request()
2664 add_acct_request(q, rq, where); in blk_insert_cloned_request()
2666 __blk_run_queue(q); in blk_insert_cloned_request()
2667 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request()
2745 part_round_stats(req->q, cpu, part); in blk_account_io_done()
2746 part_dec_in_flight(req->q, part, rq_data_dir(req)); in blk_account_io_done()
2760 switch (rq->q->rpm_status) { in blk_pm_allow_request()
2805 part_round_stats(rq->q, cpu, part); in blk_account_io_start()
2806 part_inc_in_flight(rq->q, part, rw); in blk_account_io_start()
2813 static struct request *elv_next_request(struct request_queue *q) in elv_next_request() argument
2816 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); in elv_next_request()
2818 WARN_ON_ONCE(q->mq_ops); in elv_next_request()
2821 list_for_each_entry(rq, &q->queue_head, queuelist) { in elv_next_request()
2845 !queue_flush_queueable(q)) { in elv_next_request()
2849 if (unlikely(blk_queue_bypass(q)) || in elv_next_request()
2850 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) in elv_next_request()
2868 struct request *blk_peek_request(struct request_queue *q) in blk_peek_request() argument
2873 lockdep_assert_held(q->queue_lock); in blk_peek_request()
2874 WARN_ON_ONCE(q->mq_ops); in blk_peek_request()
2876 while ((rq = elv_next_request(q)) != NULL) { in blk_peek_request()
2884 elv_activate_rq(q, rq); in blk_peek_request()
2892 trace_block_rq_issue(q, rq); in blk_peek_request()
2895 if (!q->boundary_rq || q->boundary_rq == rq) { in blk_peek_request()
2896 q->end_sector = rq_end_sector(rq); in blk_peek_request()
2897 q->boundary_rq = NULL; in blk_peek_request()
2903 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_peek_request()
2913 if (!q->prep_rq_fn) in blk_peek_request()
2916 ret = q->prep_rq_fn(q, rq); in blk_peek_request()
2926 if (q->dma_drain_size && blk_rq_bytes(rq) && in blk_peek_request()
2958 struct request_queue *q = rq->q; in blk_dequeue_request() local
2971 q->in_flight[rq_is_sync(rq)]++; in blk_dequeue_request()
2984 lockdep_assert_held(req->q->queue_lock); in blk_start_request()
2985 WARN_ON_ONCE(req->q->mq_ops); in blk_start_request()
2989 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { in blk_start_request()
2995 rq_qos_issue(req->q, req); in blk_start_request()
3015 struct request *blk_fetch_request(struct request_queue *q) in blk_fetch_request() argument
3019 lockdep_assert_held(q->queue_lock); in blk_fetch_request()
3020 WARN_ON_ONCE(q->mq_ops); in blk_fetch_request()
3022 rq = blk_peek_request(q); in blk_fetch_request()
3166 if (blk_queue_add_random(rq->q)) in blk_update_bidi_request()
3184 struct request_queue *q = req->q; in blk_unprep_request() local
3187 if (q->unprep_rq_fn) in blk_unprep_request()
3188 q->unprep_rq_fn(q, req); in blk_unprep_request()
3194 struct request_queue *q = req->q; in blk_finish_request() local
3197 lockdep_assert_held(req->q->queue_lock); in blk_finish_request()
3198 WARN_ON_ONCE(q->mq_ops); in blk_finish_request()
3204 blk_queue_end_tag(q, req); in blk_finish_request()
3209 laptop_io_completion(req->q->backing_dev_info); in blk_finish_request()
3219 rq_qos_done(q, req); in blk_finish_request()
3223 __blk_put_request(req->next_rq->q, req->next_rq); in blk_finish_request()
3225 __blk_put_request(q, req); in blk_finish_request()
3250 struct request_queue *q = rq->q; in blk_end_bidi_request() local
3253 WARN_ON_ONCE(q->mq_ops); in blk_end_bidi_request()
3258 spin_lock_irqsave(q->queue_lock, flags); in blk_end_bidi_request()
3260 spin_unlock_irqrestore(q->queue_lock, flags); in blk_end_bidi_request()
3283 lockdep_assert_held(rq->q->queue_lock); in __blk_end_bidi_request()
3284 WARN_ON_ONCE(rq->q->mq_ops); in __blk_end_bidi_request()
3311 WARN_ON_ONCE(rq->q->mq_ops); in blk_end_request()
3353 lockdep_assert_held(rq->q->queue_lock); in __blk_end_request()
3354 WARN_ON_ONCE(rq->q->mq_ops); in __blk_end_request()
3373 lockdep_assert_held(rq->q->queue_lock); in __blk_end_request_all()
3374 WARN_ON_ONCE(rq->q->mq_ops); in __blk_end_request_all()
3403 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, in blk_rq_bio_prep() argument
3407 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
3456 int blk_lld_busy(struct request_queue *q) in blk_lld_busy() argument
3458 if (q->lld_busy_fn) in blk_lld_busy()
3459 return q->lld_busy_fn(q); in blk_lld_busy()
3618 return !(rqa->q < rqb->q || in plug_rq_cmp()
3619 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); in plug_rq_cmp()
3628 static void queue_unplugged(struct request_queue *q, unsigned int depth, in queue_unplugged() argument
3630 __releases(q->queue_lock) in queue_unplugged()
3632 lockdep_assert_held(q->queue_lock); in queue_unplugged()
3634 trace_block_unplug(q, depth, !from_schedule); in queue_unplugged()
3637 blk_run_queue_async(q); in queue_unplugged()
3639 __blk_run_queue(q); in queue_unplugged()
3640 spin_unlock_irq(q->queue_lock); in queue_unplugged()
3687 struct request_queue *q; in blk_flush_plug_list() local
3704 q = NULL; in blk_flush_plug_list()
3710 BUG_ON(!rq->q); in blk_flush_plug_list()
3711 if (rq->q != q) { in blk_flush_plug_list()
3715 if (q) in blk_flush_plug_list()
3716 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3717 q = rq->q; in blk_flush_plug_list()
3719 spin_lock_irq(q->queue_lock); in blk_flush_plug_list()
3725 if (unlikely(blk_queue_dying(q))) { in blk_flush_plug_list()
3734 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); in blk_flush_plug_list()
3736 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); in blk_flush_plug_list()
3744 if (q) in blk_flush_plug_list()
3745 queue_unplugged(q, depth, from_schedule); in blk_flush_plug_list()
3780 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
3783 if (q->mq_ops) { in blk_pm_runtime_init()
3788 q->dev = dev; in blk_pm_runtime_init()
3789 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
3790 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
3791 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
3816 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
3820 if (!q->dev) in blk_pre_runtime_suspend()
3823 spin_lock_irq(q->queue_lock); in blk_pre_runtime_suspend()
3824 if (q->nr_pending) { in blk_pre_runtime_suspend()
3826 pm_runtime_mark_last_busy(q->dev); in blk_pre_runtime_suspend()
3828 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend()
3830 spin_unlock_irq(q->queue_lock); in blk_pre_runtime_suspend()
3848 void blk_post_runtime_suspend(struct request_queue *q, int err) in blk_post_runtime_suspend() argument
3850 if (!q->dev) in blk_post_runtime_suspend()
3853 spin_lock_irq(q->queue_lock); in blk_post_runtime_suspend()
3855 q->rpm_status = RPM_SUSPENDED; in blk_post_runtime_suspend()
3857 q->rpm_status = RPM_ACTIVE; in blk_post_runtime_suspend()
3858 pm_runtime_mark_last_busy(q->dev); in blk_post_runtime_suspend()
3860 spin_unlock_irq(q->queue_lock); in blk_post_runtime_suspend()
3875 void blk_pre_runtime_resume(struct request_queue *q) in blk_pre_runtime_resume() argument
3877 if (!q->dev) in blk_pre_runtime_resume()
3880 spin_lock_irq(q->queue_lock); in blk_pre_runtime_resume()
3881 q->rpm_status = RPM_RESUMING; in blk_pre_runtime_resume()
3882 spin_unlock_irq(q->queue_lock); in blk_pre_runtime_resume()
3900 void blk_post_runtime_resume(struct request_queue *q, int err) in blk_post_runtime_resume() argument
3902 if (!q->dev) in blk_post_runtime_resume()
3905 spin_lock_irq(q->queue_lock); in blk_post_runtime_resume()
3907 q->rpm_status = RPM_ACTIVE; in blk_post_runtime_resume()
3908 __blk_run_queue(q); in blk_post_runtime_resume()
3909 pm_runtime_mark_last_busy(q->dev); in blk_post_runtime_resume()
3910 pm_request_autosuspend(q->dev); in blk_post_runtime_resume()
3912 q->rpm_status = RPM_SUSPENDED; in blk_post_runtime_resume()
3914 spin_unlock_irq(q->queue_lock); in blk_post_runtime_resume()
3932 void blk_set_runtime_active(struct request_queue *q) in blk_set_runtime_active() argument
3934 spin_lock_irq(q->queue_lock); in blk_set_runtime_active()
3935 q->rpm_status = RPM_ACTIVE; in blk_set_runtime_active()
3936 pm_runtime_mark_last_busy(q->dev); in blk_set_runtime_active()
3937 pm_request_autosuspend(q->dev); in blk_set_runtime_active()
3938 spin_unlock_irq(q->queue_lock); in blk_set_runtime_active()