Lines Matching refs:hctx
67 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
69 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
70 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
71 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
77 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
80 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
82 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
83 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
86 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
89 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
91 sbitmap_clear_bit(&hctx->ctx_map, bit); in blk_mq_hctx_clear_pending()
99 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight() argument
125 static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight_rw() argument
236 struct blk_mq_hw_ctx *hctx; in blk_mq_quiesce_queue() local
242 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_quiesce_queue()
243 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_quiesce_queue()
244 synchronize_srcu(hctx->srcu); in blk_mq_quiesce_queue()
271 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() local
274 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
275 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_wake_waiters()
276 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
279 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_can_queue() argument
281 return blk_mq_has_free_tags(hctx->tags); in blk_mq_can_queue()
305 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) { in blk_mq_rq_ctx_init()
307 atomic_inc(&data->hctx->nr_active); in blk_mq_rq_ctx_init()
311 data->hctx->tags->rqs[rq->tag] = rq; in blk_mq_rq_ctx_init()
317 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
377 if (likely(!data->hctx)) in blk_mq_get_request()
378 data->hctx = blk_mq_map_queue(q, data->cmd_flags, in blk_mq_get_request()
396 blk_mq_tag_busy(data->hctx); in blk_mq_get_request()
418 data->hctx->queued++; in blk_mq_get_request()
474 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; in blk_mq_alloc_request_hctx()
475 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { in blk_mq_alloc_request_hctx()
479 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx()
496 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request() local
502 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
504 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); in __blk_mq_free_request()
505 blk_mq_sched_restart(hctx); in __blk_mq_free_request()
514 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request() local
527 atomic_dec(&hctx->nr_active); in blk_mq_free_request()
629 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) in hctx_unlock() argument
630 __releases(hctx->srcu) in hctx_unlock()
632 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) in hctx_unlock()
635 srcu_read_unlock(hctx->srcu, srcu_idx); in hctx_unlock()
638 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) in hctx_lock() argument
639 __acquires(hctx->srcu) in hctx_lock()
641 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { in hctx_lock()
646 *srcu_idx = srcu_read_lock(hctx->srcu); in hctx_lock()
828 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, in blk_mq_rq_inflight() argument
835 if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { in blk_mq_rq_inflight()
889 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, in blk_mq_check_expired() argument
922 if (is_flush_rq(rq, hctx)) in blk_mq_check_expired()
935 struct blk_mq_hw_ctx *hctx; in blk_mq_timeout_work() local
965 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work()
967 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_timeout_work()
968 blk_mq_tag_idle(hctx); in blk_mq_timeout_work()
975 struct blk_mq_hw_ctx *hctx; member
982 struct blk_mq_hw_ctx *hctx = flush_data->hctx; in flush_busy_ctx() local
983 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx()
984 enum hctx_type type = hctx->type; in flush_busy_ctx()
997 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in blk_mq_flush_busy_ctxs() argument
1000 .hctx = hctx, in blk_mq_flush_busy_ctxs()
1004 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); in blk_mq_flush_busy_ctxs()
1009 struct blk_mq_hw_ctx *hctx; member
1017 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; in dispatch_rq_from_ctx() local
1018 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx()
1019 enum hctx_type type = hctx->type; in dispatch_rq_from_ctx()
1033 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_dequeue_from_ctx() argument
1036 unsigned off = start ? start->index_hw[hctx->type] : 0; in blk_mq_dequeue_from_ctx()
1038 .hctx = hctx, in blk_mq_dequeue_from_ctx()
1042 __sbitmap_for_each_set(&hctx->ctx_map, off, in blk_mq_dequeue_from_ctx()
1060 .hctx = rq->mq_hctx, in blk_mq_get_driver_tag()
1069 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) in blk_mq_get_driver_tag()
1072 shared = blk_mq_tag_busy(data.hctx); in blk_mq_get_driver_tag()
1077 atomic_inc(&data.hctx->nr_active); in blk_mq_get_driver_tag()
1079 data.hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
1089 struct blk_mq_hw_ctx *hctx; in blk_mq_dispatch_wake() local
1091 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); in blk_mq_dispatch_wake()
1093 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1098 sbq = &hctx->tags->bitmap_tags; in blk_mq_dispatch_wake()
1101 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1103 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_wake()
1113 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, in blk_mq_mark_tag_wait() argument
1116 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; in blk_mq_mark_tag_wait()
1121 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { in blk_mq_mark_tag_wait()
1122 blk_mq_sched_mark_restart_hctx(hctx); in blk_mq_mark_tag_wait()
1135 wait = &hctx->dispatch_wait; in blk_mq_mark_tag_wait()
1139 wq = &bt_wait_ptr(sbq, hctx)->wait; in blk_mq_mark_tag_wait()
1142 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1144 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1160 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1171 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1186 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) in blk_mq_update_dispatch_busy() argument
1190 if (hctx->queue->elevator) in blk_mq_update_dispatch_busy()
1193 ewma = hctx->dispatch_busy; in blk_mq_update_dispatch_busy()
1203 hctx->dispatch_busy = ewma; in blk_mq_update_dispatch_busy()
1214 struct blk_mq_hw_ctx *hctx; in blk_mq_dispatch_rq_list() local
1234 hctx = rq->mq_hctx; in blk_mq_dispatch_rq_list()
1235 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) in blk_mq_dispatch_rq_list()
1246 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_dispatch_rq_list()
1247 blk_mq_put_dispatch_budget(hctx); in blk_mq_dispatch_rq_list()
1252 if (hctx->flags & BLK_MQ_F_TAG_SHARED) in blk_mq_dispatch_rq_list()
1273 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_dispatch_rq_list()
1298 hctx->dispatched[queued_to_index(queued)]++; in blk_mq_dispatch_rq_list()
1313 q->mq_ops->commit_rqs(hctx); in blk_mq_dispatch_rq_list()
1315 spin_lock(&hctx->lock); in blk_mq_dispatch_rq_list()
1316 list_splice_init(list, &hctx->dispatch); in blk_mq_dispatch_rq_list()
1317 spin_unlock(&hctx->lock); in blk_mq_dispatch_rq_list()
1343 needs_restart = blk_mq_sched_needs_restart(hctx); in blk_mq_dispatch_rq_list()
1345 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) in blk_mq_dispatch_rq_list()
1346 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_rq_list()
1348 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); in blk_mq_dispatch_rq_list()
1350 blk_mq_update_dispatch_busy(hctx, true); in blk_mq_dispatch_rq_list()
1353 blk_mq_update_dispatch_busy(hctx, false); in blk_mq_dispatch_rq_list()
1365 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) in __blk_mq_run_hw_queue() argument
1386 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && in __blk_mq_run_hw_queue()
1387 cpu_online(hctx->next_cpu)) { in __blk_mq_run_hw_queue()
1390 cpumask_empty(hctx->cpumask) ? "inactive": "active"); in __blk_mq_run_hw_queue()
1400 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in __blk_mq_run_hw_queue()
1402 hctx_lock(hctx, &srcu_idx); in __blk_mq_run_hw_queue()
1403 blk_mq_sched_dispatch_requests(hctx); in __blk_mq_run_hw_queue()
1404 hctx_unlock(hctx, srcu_idx); in __blk_mq_run_hw_queue()
1407 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_first_mapped_cpu() argument
1409 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); in blk_mq_first_mapped_cpu()
1412 cpu = cpumask_first(hctx->cpumask); in blk_mq_first_mapped_cpu()
1422 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu() argument
1425 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu()
1427 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu()
1430 if (--hctx->next_cpu_batch <= 0) { in blk_mq_hctx_next_cpu()
1432 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu()
1435 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu()
1436 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_hctx_next_cpu()
1453 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
1454 hctx->next_cpu_batch = 1; in blk_mq_hctx_next_cpu()
1458 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
1462 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, in __blk_mq_delay_run_hw_queue() argument
1465 if (unlikely(blk_mq_hctx_stopped(hctx))) in __blk_mq_delay_run_hw_queue()
1468 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { in __blk_mq_delay_run_hw_queue()
1470 if (cpumask_test_cpu(cpu, hctx->cpumask)) { in __blk_mq_delay_run_hw_queue()
1471 __blk_mq_run_hw_queue(hctx); in __blk_mq_delay_run_hw_queue()
1479 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, in __blk_mq_delay_run_hw_queue()
1483 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_run_hw_queue() argument
1485 __blk_mq_delay_run_hw_queue(hctx, true, msecs); in blk_mq_delay_run_hw_queue()
1489 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue() argument
1502 hctx_lock(hctx, &srcu_idx); in blk_mq_run_hw_queue()
1503 need_run = !blk_queue_quiesced(hctx->queue) && in blk_mq_run_hw_queue()
1504 blk_mq_hctx_has_pending(hctx); in blk_mq_run_hw_queue()
1505 hctx_unlock(hctx, srcu_idx); in blk_mq_run_hw_queue()
1508 __blk_mq_delay_run_hw_queue(hctx, async, 0); in blk_mq_run_hw_queue()
1518 struct blk_mq_hw_ctx *hctx; in blk_mq_run_hw_queues() local
1521 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
1522 if (blk_mq_hctx_stopped(hctx)) in blk_mq_run_hw_queues()
1525 blk_mq_run_hw_queue(hctx, async); in blk_mq_run_hw_queues()
1539 struct blk_mq_hw_ctx *hctx; in blk_mq_queue_stopped() local
1542 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_queue_stopped()
1543 if (blk_mq_hctx_stopped(hctx)) in blk_mq_queue_stopped()
1559 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue() argument
1561 cancel_delayed_work(&hctx->run_work); in blk_mq_stop_hw_queue()
1563 set_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_stop_hw_queue()
1578 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues() local
1581 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
1582 blk_mq_stop_hw_queue(hctx); in blk_mq_stop_hw_queues()
1586 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue() argument
1588 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_hw_queue()
1590 blk_mq_run_hw_queue(hctx, false); in blk_mq_start_hw_queue()
1596 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues() local
1599 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
1600 blk_mq_start_hw_queue(hctx); in blk_mq_start_hw_queues()
1604 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_start_stopped_hw_queue() argument
1606 if (!blk_mq_hctx_stopped(hctx)) in blk_mq_start_stopped_hw_queue()
1609 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_stopped_hw_queue()
1610 blk_mq_run_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queue()
1616 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues() local
1619 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_stopped_hw_queues()
1620 blk_mq_start_stopped_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queues()
1626 struct blk_mq_hw_ctx *hctx; in blk_mq_run_work_fn() local
1628 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); in blk_mq_run_work_fn()
1633 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_run_work_fn()
1636 __blk_mq_run_hw_queue(hctx); in blk_mq_run_work_fn()
1639 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_req_list() argument
1644 enum hctx_type type = hctx->type; in __blk_mq_insert_req_list()
1648 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1656 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in __blk_mq_insert_request() argument
1663 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1664 blk_mq_hctx_mark_pending(hctx, ctx); in __blk_mq_insert_request()
1673 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_bypass_insert() local
1675 spin_lock(&hctx->lock); in blk_mq_request_bypass_insert()
1676 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1677 spin_unlock(&hctx->lock); in blk_mq_request_bypass_insert()
1680 blk_mq_run_hw_queue(hctx, false); in blk_mq_request_bypass_insert()
1683 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, in blk_mq_insert_requests() argument
1688 enum hctx_type type = hctx->type; in blk_mq_insert_requests()
1696 trace_block_rq_insert(hctx->queue, rq); in blk_mq_insert_requests()
1701 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_requests()
1790 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_issue_directly() argument
1802 new_cookie = request_to_qc_t(hctx, rq); in __blk_mq_issue_directly()
1809 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_issue_directly()
1812 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
1817 blk_mq_update_dispatch_busy(hctx, true); in __blk_mq_issue_directly()
1821 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
1829 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_try_issue_directly() argument
1844 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { in __blk_mq_try_issue_directly()
1853 if (!blk_mq_get_dispatch_budget(hctx)) in __blk_mq_try_issue_directly()
1857 blk_mq_put_dispatch_budget(hctx); in __blk_mq_try_issue_directly()
1861 return __blk_mq_issue_directly(hctx, rq, cookie, last); in __blk_mq_try_issue_directly()
1870 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_directly() argument
1876 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in blk_mq_try_issue_directly()
1878 hctx_lock(hctx, &srcu_idx); in blk_mq_try_issue_directly()
1880 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); in blk_mq_try_issue_directly()
1886 hctx_unlock(hctx, srcu_idx); in blk_mq_try_issue_directly()
1894 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_issue_directly() local
1896 hctx_lock(hctx, &srcu_idx); in blk_mq_request_issue_directly()
1897 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); in blk_mq_request_issue_directly()
1898 hctx_unlock(hctx, srcu_idx); in blk_mq_request_issue_directly()
1903 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_list_directly() argument
1929 if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) in blk_mq_try_issue_list_directly()
1930 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_try_issue_list_directly()
1986 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_make_request()
1994 blk_mq_run_hw_queue(data.hctx, true); in blk_mq_make_request()
2039 data.hctx = same_queue_rq->mq_hctx; in blk_mq_make_request()
2041 blk_mq_try_issue_directly(data.hctx, same_queue_rq, in blk_mq_make_request()
2045 !data.hctx->dispatch_busy) { in blk_mq_make_request()
2046 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_make_request()
2235 struct blk_mq_hw_ctx *hctx; in blk_mq_hctx_notify_dead() local
2240 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); in blk_mq_hctx_notify_dead()
2241 ctx = __blk_mq_get_ctx(hctx->queue, cpu); in blk_mq_hctx_notify_dead()
2242 type = hctx->type; in blk_mq_hctx_notify_dead()
2247 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_notify_dead()
2254 spin_lock(&hctx->lock); in blk_mq_hctx_notify_dead()
2255 list_splice_tail_init(&tmp, &hctx->dispatch); in blk_mq_hctx_notify_dead()
2256 spin_unlock(&hctx->lock); in blk_mq_hctx_notify_dead()
2258 blk_mq_run_hw_queue(hctx, true); in blk_mq_hctx_notify_dead()
2262 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) in blk_mq_remove_cpuhp() argument
2265 &hctx->cpuhp_dead); in blk_mq_remove_cpuhp()
2271 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
2273 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_exit_hctx()
2274 blk_mq_tag_idle(hctx); in blk_mq_exit_hctx()
2277 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); in blk_mq_exit_hctx()
2280 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
2282 blk_mq_remove_cpuhp(hctx); in blk_mq_exit_hctx()
2285 list_add(&hctx->hctx_list, &q->unused_hctx_list); in blk_mq_exit_hctx()
2292 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues() local
2295 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
2298 blk_mq_debugfs_unregister_hctx(hctx); in blk_mq_exit_hw_queues()
2299 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
2319 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
2321 hctx->queue_num = hctx_idx; in blk_mq_init_hctx()
2323 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); in blk_mq_init_hctx()
2325 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
2328 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
2331 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
2332 hctx->numa_node)) in blk_mq_init_hctx()
2338 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
2340 blk_mq_remove_cpuhp(hctx); in blk_mq_init_hctx()
2348 struct blk_mq_hw_ctx *hctx; in blk_mq_alloc_hctx() local
2351 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); in blk_mq_alloc_hctx()
2352 if (!hctx) in blk_mq_alloc_hctx()
2355 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) in blk_mq_alloc_hctx()
2358 atomic_set(&hctx->nr_active, 0); in blk_mq_alloc_hctx()
2361 hctx->numa_node = node; in blk_mq_alloc_hctx()
2363 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); in blk_mq_alloc_hctx()
2364 spin_lock_init(&hctx->lock); in blk_mq_alloc_hctx()
2365 INIT_LIST_HEAD(&hctx->dispatch); in blk_mq_alloc_hctx()
2366 hctx->queue = q; in blk_mq_alloc_hctx()
2367 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; in blk_mq_alloc_hctx()
2369 INIT_LIST_HEAD(&hctx->hctx_list); in blk_mq_alloc_hctx()
2375 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), in blk_mq_alloc_hctx()
2377 if (!hctx->ctxs) in blk_mq_alloc_hctx()
2380 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), in blk_mq_alloc_hctx()
2383 hctx->nr_ctx = 0; in blk_mq_alloc_hctx()
2385 spin_lock_init(&hctx->dispatch_wait_lock); in blk_mq_alloc_hctx()
2386 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); in blk_mq_alloc_hctx()
2387 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); in blk_mq_alloc_hctx()
2389 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, in blk_mq_alloc_hctx()
2391 if (!hctx->fq) in blk_mq_alloc_hctx()
2394 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_alloc_hctx()
2395 init_srcu_struct(hctx->srcu); in blk_mq_alloc_hctx()
2396 blk_mq_hctx_kobj_init(hctx); in blk_mq_alloc_hctx()
2398 return hctx; in blk_mq_alloc_hctx()
2401 sbitmap_free(&hctx->ctx_map); in blk_mq_alloc_hctx()
2403 kfree(hctx->ctxs); in blk_mq_alloc_hctx()
2405 free_cpumask_var(hctx->cpumask); in blk_mq_alloc_hctx()
2407 kfree(hctx); in blk_mq_alloc_hctx()
2420 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues() local
2435 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_init_cpu_queues()
2436 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) in blk_mq_init_cpu_queues()
2437 hctx->numa_node = local_memory_node(cpu_to_node(i)); in blk_mq_init_cpu_queues()
2474 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue() local
2478 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
2479 cpumask_clear(hctx->cpumask); in blk_mq_map_swqueue()
2480 hctx->nr_ctx = 0; in blk_mq_map_swqueue()
2481 hctx->dispatch_from = NULL; in blk_mq_map_swqueue()
2511 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_map_swqueue()
2512 ctx->hctxs[j] = hctx; in blk_mq_map_swqueue()
2518 if (cpumask_test_cpu(i, hctx->cpumask)) in blk_mq_map_swqueue()
2521 cpumask_set_cpu(i, hctx->cpumask); in blk_mq_map_swqueue()
2522 hctx->type = j; in blk_mq_map_swqueue()
2523 ctx->index_hw[hctx->type] = hctx->nr_ctx; in blk_mq_map_swqueue()
2524 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
2530 BUG_ON(!hctx->nr_ctx); in blk_mq_map_swqueue()
2538 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
2543 if (!hctx->nr_ctx) { in blk_mq_map_swqueue()
2551 hctx->tags = NULL; in blk_mq_map_swqueue()
2555 hctx->tags = set->tags[i]; in blk_mq_map_swqueue()
2556 WARN_ON(!hctx->tags); in blk_mq_map_swqueue()
2563 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); in blk_mq_map_swqueue()
2568 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
2569 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_map_swqueue()
2579 struct blk_mq_hw_ctx *hctx; in queue_set_hctx_shared() local
2582 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
2584 hctx->flags |= BLK_MQ_F_TAG_SHARED; in queue_set_hctx_shared()
2586 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; in queue_set_hctx_shared()
2677 struct blk_mq_hw_ctx *hctx, *next; in blk_mq_release() local
2680 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_release()
2681 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); in blk_mq_release()
2684 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { in blk_mq_release()
2685 list_del_init(&hctx->hctx_list); in blk_mq_release()
2686 kobject_put(&hctx->kobj); in blk_mq_release()
2756 struct blk_mq_hw_ctx *hctx = NULL, *tmp; in blk_mq_alloc_and_init_hctx() local
2762 hctx = tmp; in blk_mq_alloc_and_init_hctx()
2766 if (hctx) in blk_mq_alloc_and_init_hctx()
2767 list_del_init(&hctx->hctx_list); in blk_mq_alloc_and_init_hctx()
2770 if (!hctx) in blk_mq_alloc_and_init_hctx()
2771 hctx = blk_mq_alloc_hctx(q, set, node); in blk_mq_alloc_and_init_hctx()
2772 if (!hctx) in blk_mq_alloc_and_init_hctx()
2775 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) in blk_mq_alloc_and_init_hctx()
2778 return hctx; in blk_mq_alloc_and_init_hctx()
2781 kobject_put(&hctx->kobj); in blk_mq_alloc_and_init_hctx()
2796 struct blk_mq_hw_ctx *hctx; in blk_mq_realloc_hw_ctxs() local
2807 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); in blk_mq_realloc_hw_ctxs()
2808 if (hctx) { in blk_mq_realloc_hw_ctxs()
2811 hctxs[i] = hctx; in blk_mq_realloc_hw_ctxs()
2835 struct blk_mq_hw_ctx *hctx = hctxs[j]; in blk_mq_realloc_hw_ctxs() local
2837 if (hctx) { in blk_mq_realloc_hw_ctxs()
2838 if (hctx->tags) in blk_mq_realloc_hw_ctxs()
2840 blk_mq_exit_hctx(q, set, hctx, j); in blk_mq_realloc_hw_ctxs()
3145 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests() local
3158 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
3159 if (!hctx->tags) in blk_mq_update_nr_requests()
3165 if (!hctx->sched_tags) { in blk_mq_update_nr_requests()
3166 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, in blk_mq_update_nr_requests()
3169 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, in blk_mq_update_nr_requests()
3175 q->elevator->type->ops.depth_updated(hctx); in blk_mq_update_nr_requests()
3363 struct blk_mq_hw_ctx *hctx, in blk_mq_poll_nsecs() argument
3396 struct blk_mq_hw_ctx *hctx, in blk_mq_poll_hybrid_sleep() argument
3416 nsecs = blk_mq_poll_nsecs(q, hctx, rq); in blk_mq_poll_hybrid_sleep()
3450 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) in blk_mq_poll_hybrid() argument
3458 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
3460 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
3471 return blk_mq_poll_hybrid_sleep(q, hctx, rq); in blk_mq_poll_hybrid()
3488 struct blk_mq_hw_ctx *hctx; in blk_poll() local
3498 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; in blk_poll()
3507 if (blk_mq_poll_hybrid(q, hctx, cookie)) in blk_poll()
3510 hctx->poll_considered++; in blk_poll()
3516 hctx->poll_invoked++; in blk_poll()
3518 ret = q->mq_ops->poll(hctx); in blk_poll()
3520 hctx->poll_success++; in blk_poll()