Lines Matching refs:hctx

64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)  in blk_mq_hctx_has_pending()  argument
66 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
67 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
68 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
77 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) in blk_mq_hctx_mark_pending()
78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); in blk_mq_hctx_mark_pending()
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
84 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); in blk_mq_hctx_clear_pending()
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight() argument
118 static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight_rw() argument
228 struct blk_mq_hw_ctx *hctx; in blk_mq_quiesce_queue() local
234 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_quiesce_queue()
235 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_quiesce_queue()
236 synchronize_srcu(hctx->srcu); in blk_mq_quiesce_queue()
263 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() local
266 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
267 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_wake_waiters()
268 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
271 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_can_queue() argument
273 return blk_mq_has_free_tags(hctx->tags); in blk_mq_can_queue()
288 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) { in blk_mq_rq_ctx_init()
290 atomic_inc(&data->hctx->nr_active); in blk_mq_rq_ctx_init()
294 data->hctx->tags->rqs[rq->tag] = rq; in blk_mq_rq_ctx_init()
354 if (likely(!data->hctx)) in blk_mq_get_request()
355 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); in blk_mq_get_request()
371 blk_mq_tag_busy(data->hctx); in blk_mq_get_request()
395 data->hctx->queued++; in blk_mq_get_request()
453 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; in blk_mq_alloc_request_hctx()
454 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { in blk_mq_alloc_request_hctx()
458 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx()
475 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); in __blk_mq_free_request() local
479 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
481 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); in __blk_mq_free_request()
482 blk_mq_sched_restart(hctx); in __blk_mq_free_request()
491 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); in blk_mq_free_request() local
504 atomic_dec(&hctx->nr_active); in blk_mq_free_request()
588 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) in hctx_unlock() argument
589 __releases(hctx->srcu) in hctx_unlock()
591 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) in hctx_unlock()
594 srcu_read_unlock(hctx->srcu, srcu_idx); in hctx_unlock()
597 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) in hctx_lock() argument
598 __acquires(hctx->srcu) in hctx_lock()
600 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { in hctx_lock()
605 *srcu_idx = srcu_read_lock(hctx->srcu); in hctx_lock()
807 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, in blk_mq_check_expired() argument
848 struct blk_mq_hw_ctx *hctx; in blk_mq_timeout_work() local
878 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work()
880 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_timeout_work()
881 blk_mq_tag_idle(hctx); in blk_mq_timeout_work()
888 struct blk_mq_hw_ctx *hctx; member
895 struct blk_mq_hw_ctx *hctx = flush_data->hctx; in flush_busy_ctx() local
896 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx()
909 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in blk_mq_flush_busy_ctxs() argument
912 .hctx = hctx, in blk_mq_flush_busy_ctxs()
916 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); in blk_mq_flush_busy_ctxs()
921 struct blk_mq_hw_ctx *hctx; member
929 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; in dispatch_rq_from_ctx() local
930 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx()
944 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_dequeue_from_ctx() argument
949 .hctx = hctx, in blk_mq_dequeue_from_ctx()
953 __sbitmap_for_each_set(&hctx->ctx_map, off, in blk_mq_dequeue_from_ctx()
971 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), in blk_mq_get_driver_tag()
979 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) in blk_mq_get_driver_tag()
982 shared = blk_mq_tag_busy(data.hctx); in blk_mq_get_driver_tag()
987 atomic_inc(&data.hctx->nr_active); in blk_mq_get_driver_tag()
989 data.hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
999 struct blk_mq_hw_ctx *hctx; in blk_mq_dispatch_wake() local
1001 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); in blk_mq_dispatch_wake()
1003 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1005 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1007 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_wake()
1017 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, in blk_mq_mark_tag_wait() argument
1024 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { in blk_mq_mark_tag_wait()
1025 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_mark_tag_wait()
1026 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_mark_tag_wait()
1039 wait = &hctx->dispatch_wait; in blk_mq_mark_tag_wait()
1043 wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait; in blk_mq_mark_tag_wait()
1046 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1048 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1063 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1073 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1088 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) in blk_mq_update_dispatch_busy() argument
1092 if (hctx->queue->elevator) in blk_mq_update_dispatch_busy()
1095 ewma = hctx->dispatch_busy; in blk_mq_update_dispatch_busy()
1105 hctx->dispatch_busy = ewma; in blk_mq_update_dispatch_busy()
1116 struct blk_mq_hw_ctx *hctx; in blk_mq_dispatch_rq_list() local
1136 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); in blk_mq_dispatch_rq_list()
1137 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) in blk_mq_dispatch_rq_list()
1148 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_dispatch_rq_list()
1149 blk_mq_put_dispatch_budget(hctx); in blk_mq_dispatch_rq_list()
1154 if (hctx->flags & BLK_MQ_F_TAG_SHARED) in blk_mq_dispatch_rq_list()
1175 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_dispatch_rq_list()
1200 hctx->dispatched[queued_to_index(queued)]++; in blk_mq_dispatch_rq_list()
1209 spin_lock(&hctx->lock); in blk_mq_dispatch_rq_list()
1210 list_splice_init(list, &hctx->dispatch); in blk_mq_dispatch_rq_list()
1211 spin_unlock(&hctx->lock); in blk_mq_dispatch_rq_list()
1237 needs_restart = blk_mq_sched_needs_restart(hctx); in blk_mq_dispatch_rq_list()
1239 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) in blk_mq_dispatch_rq_list()
1240 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_rq_list()
1242 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); in blk_mq_dispatch_rq_list()
1244 blk_mq_update_dispatch_busy(hctx, true); in blk_mq_dispatch_rq_list()
1247 blk_mq_update_dispatch_busy(hctx, false); in blk_mq_dispatch_rq_list()
1259 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) in __blk_mq_run_hw_queue() argument
1280 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && in __blk_mq_run_hw_queue()
1281 cpu_online(hctx->next_cpu)) { in __blk_mq_run_hw_queue()
1284 cpumask_empty(hctx->cpumask) ? "inactive": "active"); in __blk_mq_run_hw_queue()
1294 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in __blk_mq_run_hw_queue()
1296 hctx_lock(hctx, &srcu_idx); in __blk_mq_run_hw_queue()
1297 blk_mq_sched_dispatch_requests(hctx); in __blk_mq_run_hw_queue()
1298 hctx_unlock(hctx, srcu_idx); in __blk_mq_run_hw_queue()
1301 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_first_mapped_cpu() argument
1303 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); in blk_mq_first_mapped_cpu()
1306 cpu = cpumask_first(hctx->cpumask); in blk_mq_first_mapped_cpu()
1316 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu() argument
1319 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu()
1321 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu()
1324 if (--hctx->next_cpu_batch <= 0) { in blk_mq_hctx_next_cpu()
1326 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu()
1329 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu()
1330 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_hctx_next_cpu()
1347 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
1348 hctx->next_cpu_batch = 1; in blk_mq_hctx_next_cpu()
1352 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
1356 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, in __blk_mq_delay_run_hw_queue() argument
1359 if (unlikely(blk_mq_hctx_stopped(hctx))) in __blk_mq_delay_run_hw_queue()
1362 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { in __blk_mq_delay_run_hw_queue()
1364 if (cpumask_test_cpu(cpu, hctx->cpumask)) { in __blk_mq_delay_run_hw_queue()
1365 __blk_mq_run_hw_queue(hctx); in __blk_mq_delay_run_hw_queue()
1373 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, in __blk_mq_delay_run_hw_queue()
1377 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_run_hw_queue() argument
1379 __blk_mq_delay_run_hw_queue(hctx, true, msecs); in blk_mq_delay_run_hw_queue()
1383 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue() argument
1396 hctx_lock(hctx, &srcu_idx); in blk_mq_run_hw_queue()
1397 need_run = !blk_queue_quiesced(hctx->queue) && in blk_mq_run_hw_queue()
1398 blk_mq_hctx_has_pending(hctx); in blk_mq_run_hw_queue()
1399 hctx_unlock(hctx, srcu_idx); in blk_mq_run_hw_queue()
1402 __blk_mq_delay_run_hw_queue(hctx, async, 0); in blk_mq_run_hw_queue()
1412 struct blk_mq_hw_ctx *hctx; in blk_mq_run_hw_queues() local
1415 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
1416 if (blk_mq_hctx_stopped(hctx)) in blk_mq_run_hw_queues()
1419 blk_mq_run_hw_queue(hctx, async); in blk_mq_run_hw_queues()
1433 struct blk_mq_hw_ctx *hctx; in blk_mq_queue_stopped() local
1436 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_queue_stopped()
1437 if (blk_mq_hctx_stopped(hctx)) in blk_mq_queue_stopped()
1453 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue() argument
1455 cancel_delayed_work(&hctx->run_work); in blk_mq_stop_hw_queue()
1457 set_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_stop_hw_queue()
1472 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues() local
1475 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
1476 blk_mq_stop_hw_queue(hctx); in blk_mq_stop_hw_queues()
1480 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue() argument
1482 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_hw_queue()
1484 blk_mq_run_hw_queue(hctx, false); in blk_mq_start_hw_queue()
1490 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues() local
1493 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
1494 blk_mq_start_hw_queue(hctx); in blk_mq_start_hw_queues()
1498 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_start_stopped_hw_queue() argument
1500 if (!blk_mq_hctx_stopped(hctx)) in blk_mq_start_stopped_hw_queue()
1503 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_stopped_hw_queue()
1504 blk_mq_run_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queue()
1510 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues() local
1513 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_stopped_hw_queues()
1514 blk_mq_start_stopped_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queues()
1520 struct blk_mq_hw_ctx *hctx; in blk_mq_run_work_fn() local
1522 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); in blk_mq_run_work_fn()
1527 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) in blk_mq_run_work_fn()
1530 __blk_mq_run_hw_queue(hctx); in blk_mq_run_work_fn()
1533 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_req_list() argument
1541 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1549 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in __blk_mq_insert_request() argument
1556 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1557 blk_mq_hctx_mark_pending(hctx, ctx); in __blk_mq_insert_request()
1567 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); in blk_mq_request_bypass_insert() local
1569 spin_lock(&hctx->lock); in blk_mq_request_bypass_insert()
1570 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1571 spin_unlock(&hctx->lock); in blk_mq_request_bypass_insert()
1574 blk_mq_run_hw_queue(hctx, false); in blk_mq_request_bypass_insert()
1577 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, in blk_mq_insert_requests() argument
1589 trace_block_rq_insert(hctx->queue, rq); in blk_mq_insert_requests()
1594 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_requests()
1666 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) in request_to_qc_t() argument
1669 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); in request_to_qc_t()
1671 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); in request_to_qc_t()
1674 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_issue_directly() argument
1686 new_cookie = request_to_qc_t(hctx, rq); in __blk_mq_issue_directly()
1693 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_issue_directly()
1696 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
1701 blk_mq_update_dispatch_busy(hctx, true); in __blk_mq_issue_directly()
1705 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
1713 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_try_issue_directly() argument
1728 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { in __blk_mq_try_issue_directly()
1737 if (!blk_mq_get_dispatch_budget(hctx)) in __blk_mq_try_issue_directly()
1741 blk_mq_put_dispatch_budget(hctx); in __blk_mq_try_issue_directly()
1745 return __blk_mq_issue_directly(hctx, rq, cookie); in __blk_mq_try_issue_directly()
1754 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_directly() argument
1760 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in blk_mq_try_issue_directly()
1762 hctx_lock(hctx, &srcu_idx); in blk_mq_try_issue_directly()
1764 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); in blk_mq_try_issue_directly()
1770 hctx_unlock(hctx, srcu_idx); in blk_mq_try_issue_directly()
1779 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); in blk_mq_request_issue_directly() local
1781 hctx_lock(hctx, &srcu_idx); in blk_mq_request_issue_directly()
1782 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); in blk_mq_request_issue_directly()
1783 hctx_unlock(hctx, srcu_idx); in blk_mq_request_issue_directly()
1788 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_list_directly() argument
1848 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_make_request()
1857 blk_mq_run_hw_queue(data.hctx, true); in blk_mq_make_request()
1904 data.hctx = blk_mq_map_queue(q, in blk_mq_make_request()
1906 blk_mq_try_issue_directly(data.hctx, same_queue_rq, in blk_mq_make_request()
1910 !data.hctx->dispatch_busy)) { in blk_mq_make_request()
1913 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_make_request()
2104 struct blk_mq_hw_ctx *hctx; in blk_mq_hctx_notify_dead() local
2108 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); in blk_mq_hctx_notify_dead()
2109 ctx = __blk_mq_get_ctx(hctx->queue, cpu); in blk_mq_hctx_notify_dead()
2114 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_notify_dead()
2121 spin_lock(&hctx->lock); in blk_mq_hctx_notify_dead()
2122 list_splice_tail_init(&tmp, &hctx->dispatch); in blk_mq_hctx_notify_dead()
2123 spin_unlock(&hctx->lock); in blk_mq_hctx_notify_dead()
2125 blk_mq_run_hw_queue(hctx, true); in blk_mq_hctx_notify_dead()
2129 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) in blk_mq_remove_cpuhp() argument
2132 &hctx->cpuhp_dead); in blk_mq_remove_cpuhp()
2138 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
2140 blk_mq_debugfs_unregister_hctx(hctx); in blk_mq_exit_hctx()
2142 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_exit_hctx()
2143 blk_mq_tag_idle(hctx); in blk_mq_exit_hctx()
2146 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); in blk_mq_exit_hctx()
2149 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
2151 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_exit_hctx()
2152 cleanup_srcu_struct(hctx->srcu); in blk_mq_exit_hctx()
2154 blk_mq_remove_cpuhp(hctx); in blk_mq_exit_hctx()
2155 blk_free_flush_queue(hctx->fq); in blk_mq_exit_hctx()
2156 sbitmap_free(&hctx->ctx_map); in blk_mq_exit_hctx()
2162 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues() local
2165 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
2168 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
2174 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
2178 node = hctx->numa_node; in blk_mq_init_hctx()
2180 node = hctx->numa_node = set->numa_node; in blk_mq_init_hctx()
2182 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); in blk_mq_init_hctx()
2183 spin_lock_init(&hctx->lock); in blk_mq_init_hctx()
2184 INIT_LIST_HEAD(&hctx->dispatch); in blk_mq_init_hctx()
2185 hctx->queue = q; in blk_mq_init_hctx()
2186 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; in blk_mq_init_hctx()
2188 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); in blk_mq_init_hctx()
2190 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
2196 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), in blk_mq_init_hctx()
2198 if (!hctx->ctxs) in blk_mq_init_hctx()
2201 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, in blk_mq_init_hctx()
2205 hctx->nr_ctx = 0; in blk_mq_init_hctx()
2207 spin_lock_init(&hctx->dispatch_wait_lock); in blk_mq_init_hctx()
2208 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); in blk_mq_init_hctx()
2209 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); in blk_mq_init_hctx()
2212 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
2215 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); in blk_mq_init_hctx()
2216 if (!hctx->fq) in blk_mq_init_hctx()
2219 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node)) in blk_mq_init_hctx()
2222 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_init_hctx()
2223 init_srcu_struct(hctx->srcu); in blk_mq_init_hctx()
2225 blk_mq_debugfs_register_hctx(q, hctx); in blk_mq_init_hctx()
2230 kfree(hctx->fq); in blk_mq_init_hctx()
2233 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
2235 sbitmap_free(&hctx->ctx_map); in blk_mq_init_hctx()
2237 kfree(hctx->ctxs); in blk_mq_init_hctx()
2239 blk_mq_remove_cpuhp(hctx); in blk_mq_init_hctx()
2250 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues() local
2261 hctx = blk_mq_map_queue(q, i); in blk_mq_init_cpu_queues()
2262 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) in blk_mq_init_cpu_queues()
2263 hctx->numa_node = local_memory_node(cpu_to_node(i)); in blk_mq_init_cpu_queues()
2299 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue() local
2308 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
2309 cpumask_clear(hctx->cpumask); in blk_mq_map_swqueue()
2310 hctx->nr_ctx = 0; in blk_mq_map_swqueue()
2311 hctx->dispatch_from = NULL; in blk_mq_map_swqueue()
2334 hctx = blk_mq_map_queue(q, i); in blk_mq_map_swqueue()
2336 cpumask_set_cpu(i, hctx->cpumask); in blk_mq_map_swqueue()
2337 ctx->index_hw = hctx->nr_ctx; in blk_mq_map_swqueue()
2338 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
2343 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
2348 if (!hctx->nr_ctx) { in blk_mq_map_swqueue()
2356 hctx->tags = NULL; in blk_mq_map_swqueue()
2360 hctx->tags = set->tags[i]; in blk_mq_map_swqueue()
2361 WARN_ON(!hctx->tags); in blk_mq_map_swqueue()
2368 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); in blk_mq_map_swqueue()
2373 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
2374 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_map_swqueue()
2384 struct blk_mq_hw_ctx *hctx; in queue_set_hctx_shared() local
2387 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
2389 hctx->flags |= BLK_MQ_F_TAG_SHARED; in queue_set_hctx_shared()
2391 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; in queue_set_hctx_shared()
2456 struct blk_mq_hw_ctx *hctx; in blk_mq_release() local
2460 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_release()
2461 if (!hctx) in blk_mq_release()
2463 kobject_put(&hctx->kobj); in blk_mq_release()
2551 struct blk_mq_hw_ctx *hctx = hctxs[j]; in blk_mq_realloc_hw_ctxs() local
2553 if (hctx) { in blk_mq_realloc_hw_ctxs()
2554 if (hctx->tags) in blk_mq_realloc_hw_ctxs()
2556 blk_mq_exit_hctx(q, set, hctx, j); in blk_mq_realloc_hw_ctxs()
2557 kobject_put(&hctx->kobj); in blk_mq_realloc_hw_ctxs()
2855 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests() local
2865 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
2866 if (!hctx->tags) in blk_mq_update_nr_requests()
2872 if (!hctx->sched_tags) { in blk_mq_update_nr_requests()
2873 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, in blk_mq_update_nr_requests()
2876 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, in blk_mq_update_nr_requests()
3048 struct blk_mq_hw_ctx *hctx, in blk_mq_poll_nsecs() argument
3081 struct blk_mq_hw_ctx *hctx, in blk_mq_poll_hybrid_sleep() argument
3104 nsecs = blk_mq_poll_nsecs(q, hctx, rq); in blk_mq_poll_hybrid_sleep()
3138 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) in __blk_mq_poll() argument
3140 struct request_queue *q = hctx->queue; in __blk_mq_poll()
3150 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) in __blk_mq_poll()
3153 hctx->poll_considered++; in __blk_mq_poll()
3159 hctx->poll_invoked++; in __blk_mq_poll()
3161 ret = q->mq_ops->poll(hctx, rq->tag); in __blk_mq_poll()
3163 hctx->poll_success++; in __blk_mq_poll()
3184 struct blk_mq_hw_ctx *hctx; in blk_mq_poll() local
3190 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; in blk_mq_poll()
3192 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll()
3194 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll()
3205 return __blk_mq_poll(hctx, rq); in blk_mq_poll()