Home
last modified time | relevance | path

Searched refs:hctx (Results 1 – 25 of 57) sorted by relevance

123

/Linux-v5.15/block/
Dblk-mq-sched.c51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
60 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
62 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart()
64 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_restart()
75 blk_mq_run_hw_queue(hctx, true); in blk_mq_sched_restart()
89 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local
96 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
105 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list()
[all …]
Dblk-mq.c70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
72 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
73 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
74 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
83 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
85 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
86 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
92 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
[all …]
Dblk-mq-sysfs.c36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local
39 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_hw_sysfs_release()
40 cleanup_srcu_struct(hctx->srcu); in blk_mq_hw_sysfs_release()
41 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release()
42 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release()
43 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release()
44 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
45 kfree(hctx); in blk_mq_hw_sysfs_release()
58 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
63 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
[all …]
Dblk-mq.h43 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
47 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
69 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
73 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
78 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
130 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
160 struct blk_mq_hw_ctx *hctx; member
171 return data->hctx->sched_tags; in blk_mq_tags_from_data()
173 return data->hctx->tags; in blk_mq_tags_from_data()
[all …]
Dblk-mq-debugfs.c228 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local
230 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show()
256 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local
257 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show()
267 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show()
363 __acquires(&hctx->lock) in hctx_dispatch_start()
365 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local
367 spin_lock(&hctx->lock); in hctx_dispatch_start()
368 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start()
373 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local
[all …]
Dblk-mq-tag.c25 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
27 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_busy()
28 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy()
35 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy()
36 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
37 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy()
57 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
59 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
60 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle()
63 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_idle()
[all …]
Dblk-mq-tag.h46 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
59 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument
61 if (!hctx) in bt_wait_ptr()
63 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
75 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
77 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_busy()
80 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy()
83 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument
85 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle()
88 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
Dkyber-iosched.c452 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument
454 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated()
455 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated()
463 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument
468 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
472 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
474 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
478 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx()
482 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
483 ilog2(8), GFP_KERNEL, hctx->numa_node, in kyber_init_hctx()
[all …]
Dblk-mq-debugfs.h24 struct blk_mq_hw_ctx *hctx);
25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
32 struct blk_mq_hw_ctx *hctx);
33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument
52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument
77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
Dblk-mq-sched.h18 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
19 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
23 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
27 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
72 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument
74 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
77 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
82 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument
84 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_needs_restart()
Dmq-deadline.c477 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument
479 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request()
514 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument
516 struct request_queue *q = hctx->queue; in dd_depth_updated()
518 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated()
526 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument
528 dd_depth_updated(hctx); in dd_init_hctx()
659 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() argument
662 struct request_queue *q = hctx->queue; in dd_insert_request()
713 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument
[all …]
Dblk-flush.c352 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local
370 blk_mq_sched_restart(hctx); in mq_flush_data_end_io()
519 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument
522 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
Dbfq-iosched.c4907 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument
4909 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work()
4919 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() argument
4921 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request()
5059 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() argument
5061 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request()
5071 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request()
5078 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, in bfq_dispatch_request()
5967 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request() argument
5970 struct request_queue *q = hctx->queue; in bfq_insert_request()
[all …]
Dbsg-lib.c265 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, in bsg_queue_rq() argument
268 struct request_queue *q = hctx->queue; in bsg_queue_rq()
/Linux-v5.15/include/linux/
Dblk-mq.h534 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
535 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
538 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
542 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
543 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
600 #define queue_for_each_hw_ctx(q, hctx, i) \ argument
602 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
604 #define hctx_for_each_ctx(hctx, ctx, i) \ argument
605 for ((i) = 0; (i) < (hctx)->nr_ctx && \
606 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
[all …]
/Linux-v5.15/net/dccp/ccids/
Dccid3.h104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
106 return hctx; in ccid3_hc_tx_sk()
/Linux-v5.15/drivers/s390/block/
Dscm_blk.c283 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument
286 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request()
288 struct scm_queue *sq = hctx->driver_data; in scm_blk_request()
332 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument
341 hctx->driver_data = qd; in scm_blk_init_hctx()
346 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument
348 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx()
351 kfree(hctx->driver_data); in scm_blk_exit_hctx()
352 hctx->driver_data = NULL; in scm_blk_exit_hctx()
/Linux-v5.15/drivers/block/rnbd/
Drnbd-clt.c167 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue()
171 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue()
1117 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() argument
1120 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue()
1123 blk_mq_delay_run_hw_queue(hctx, delay); in rnbd_clt_dev_kick_mq_queue()
1129 blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); in rnbd_clt_dev_kick_mq_queue()
1132 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rnbd_queue_rq() argument
1147 rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); in rnbd_queue_rq()
1161 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq()
1171 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq()
[all …]
Drnbd-clt.h105 struct blk_mq_hw_ctx *hctx; member
/Linux-v5.15/drivers/nvme/target/
Dloop.c131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument
134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq()
135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq()
218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument
232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); in nvme_loop_init_hctx()
234 hctx->driver_data = queue; in nvme_loop_init_hctx()
238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument
246 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
/Linux-v5.15/drivers/block/
Dvirtio_blk.c203 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() argument
205 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs()
206 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
217 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq() argument
220 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq()
225 int qid = hctx->queue_num; in virtio_queue_rq()
269 num = blk_rq_map_sg(hctx->queue, req, vbr->sg); in virtio_queue_rq()
285 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq()
/Linux-v5.15/drivers/scsi/
Dscsi_lib.c1640 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument
1786 static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx) in scsi_mq_poll() argument
1788 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll()
1791 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll()
1796 static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scsi_init_hctx() argument
1801 hctx->driver_data = shost; in scsi_init_hctx()
1877 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) in scsi_commit_rqs() argument
1879 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs()
1881 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
/Linux-v5.15/drivers/nvme/host/
Dtcp.c447 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_hctx() argument
453 hctx->driver_data = queue; in nvme_tcp_init_hctx()
457 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_admin_hctx() argument
463 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
2358 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_tcp_commit_rqs() argument
2360 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2366 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_tcp_queue_rq() argument
2369 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2370 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2435 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) in nvme_tcp_poll() argument
[all …]
Dpci.c395 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_admin_init_hctx() argument
402 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
404 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
408 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_init_hctx() argument
414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
415 hctx->driver_data = nvmeq; in nvme_init_hctx()
513 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_commit_rqs() argument
515 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
910 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_queue_rq() argument
913 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq()
[all …]
/Linux-v5.15/drivers/block/null_blk/
Dmain.c1477 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument
1481 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1485 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
1533 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in null_exit_hctx() argument
1535 struct nullb_queue *nq = hctx->driver_data; in null_exit_hctx()
1548 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, in null_init_hctx() argument
1551 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx()
1560 hctx->driver_data = nq; in null_init_hctx()

123