Home
last modified time | relevance | path

Searched refs:hctx (Results 1 – 25 of 36) sorted by relevance

12

/Linux-v4.19/block/
Dblk-mq-sched.c22 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_free_hctx_data() local
25 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_hctx_data()
26 if (exit && hctx->sched_data) in blk_mq_sched_free_hctx_data()
27 exit(hctx); in blk_mq_sched_free_hctx_data()
28 kfree(hctx->sched_data); in blk_mq_sched_free_hctx_data()
29 hctx->sched_data = NULL; in blk_mq_sched_free_hctx_data()
57 static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
59 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
62 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
65 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
[all …]
Dblk-mq.c64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
66 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
67 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
68 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
77 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) in blk_mq_hctx_mark_pending()
78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); in blk_mq_hctx_mark_pending()
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
84 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); in blk_mq_hctx_clear_pending()
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight() argument
[all …]
Dblk-mq.h38 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
40 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
59 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
62 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
67 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
89 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
132 struct blk_mq_hw_ctx *hctx; member
138 return data->hctx->sched_tags; in blk_mq_tags_from_data()
140 return data->hctx->tags; in blk_mq_tags_from_data()
143 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_stopped() argument
[all …]
Dblk-mq-debugfs.c228 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local
230 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show()
255 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local
256 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show()
266 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show()
383 __acquires(&hctx->lock) in hctx_dispatch_start()
385 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local
387 spin_lock(&hctx->lock); in hctx_dispatch_start()
388 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start()
393 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local
[all …]
Dblk-mq-sysfs.c22 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local
24 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release()
25 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
26 kfree(hctx); in blk_mq_hw_sysfs_release()
91 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
96 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
97 q = hctx->queue; in blk_mq_hw_sysfs_show()
105 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show()
115 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store() local
120 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_store()
[all …]
Dblk-mq-tag.c30 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
32 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy()
33 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
34 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy()
53 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
55 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
57 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_idle()
69 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, in hctx_may_queue() argument
74 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) in hctx_may_queue()
76 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
[all …]
Dblk-mq-tag.h29 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
32 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
40 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument
42 if (!hctx) in bt_wait_ptr()
44 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
56 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
58 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_busy()
61 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy()
64 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument
66 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_idle()
[all …]
Dkyber-iosched.c406 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument
408 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_init_hctx()
412 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
416 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
418 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
422 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx()
426 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
427 ilog2(8), GFP_KERNEL, hctx->numa_node)) { in kyber_init_hctx()
440 khd->domain_wait[i].private = hctx; in kyber_init_hctx()
448 hctx->sched_data = khd; in kyber_init_hctx()
[all …]
Dblk-mq-debugfs.h24 struct blk_mq_hw_ctx *hctx);
25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
32 struct blk_mq_hw_ctx *hctx);
33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
45 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument
50 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument
78 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
Dblk-mq-sched.h18 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
26 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
78 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument
80 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
83 return e->type->ops.mq.has_work(hctx); in blk_mq_sched_has_work()
88 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument
90 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_needs_restart()
Dblk-flush.c231 struct blk_mq_hw_ctx *hctx; in flush_end_io() local
235 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); in flush_end_io()
237 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); in flush_end_io()
240 blk_mq_put_driver_tag_hctx(hctx, flush_rq); in flush_end_io()
338 struct blk_mq_hw_ctx *hctx; in blk_kick_flush() local
345 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu); in blk_kick_flush()
346 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); in blk_kick_flush()
407 struct blk_mq_hw_ctx *hctx; in mq_flush_data_end_io() local
412 hctx = blk_mq_map_queue(q, ctx->cpu); in mq_flush_data_end_io()
416 blk_mq_put_driver_tag_hctx(hctx, rq); in mq_flush_data_end_io()
[all …]
Dmq-deadline.c380 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument
382 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request()
461 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) in dd_bio_merge() argument
463 struct request_queue *q = hctx->queue; in dd_bio_merge()
481 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() argument
484 struct request_queue *q = hctx->queue; in dd_insert_request()
521 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument
524 struct request_queue *q = hctx->queue; in dd_insert_requests()
533 dd_insert_request(hctx, rq, at_head); in dd_insert_requests()
567 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work() argument
[all …]
Dbfq-iosched.c1831 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) in bfq_bio_merge() argument
1833 struct request_queue *q = hctx->queue; in bfq_bio_merge()
3834 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument
3836 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work()
3846 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() argument
3848 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request()
3985 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() argument
3987 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request()
3997 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request()
4004 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, in bfq_dispatch_request()
[all …]
/Linux-v4.19/include/linux/
Dblk-mq.h267 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
268 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
271 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
275 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
276 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
319 #define queue_for_each_hw_ctx(q, hctx, i) \ argument
321 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
323 #define hctx_for_each_ctx(hctx, ctx, i) \ argument
324 for ((i) = 0; (i) < (hctx)->nr_ctx && \
325 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
/Linux-v4.19/net/dccp/ccids/
Dccid3.h117 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
118 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
119 return hctx; in ccid3_hc_tx_sk()
/Linux-v4.19/drivers/s390/block/
Dscm_blk.c282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument
285 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request()
287 struct scm_queue *sq = hctx->driver_data; in scm_blk_request()
331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument
340 hctx->driver_data = qd; in scm_blk_init_hctx()
345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument
347 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx()
350 kfree(hctx->driver_data); in scm_blk_exit_hctx()
351 hctx->driver_data = NULL; in scm_blk_exit_hctx()
/Linux-v4.19/drivers/nvme/target/
Dloop.c154 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument
157 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq()
158 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq()
235 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument
243 hctx->driver_data = queue; in nvme_loop_init_hctx()
247 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument
255 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
/Linux-v4.19/drivers/nvme/host/
Dfc.c43 struct blk_mq_hw_ctx *hctx; member
1801 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_hctx() argument
1806 hctx->driver_data = queue; in __nvme_fc_init_hctx()
1807 queue->hctx = hctx; in __nvme_fc_init_hctx()
1811 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_fc_init_hctx() argument
1816 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); in nvme_fc_init_hctx()
1822 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_fc_init_admin_hctx() argument
1827 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); in nvme_fc_init_admin_hctx()
2259 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_fc_queue_rq() argument
2262 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_fc_queue_rq()
[all …]
Dpci.c383 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_admin_init_hctx() argument
390 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
393 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
398 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in nvme_admin_exit_hctx() argument
400 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_admin_exit_hctx()
405 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_init_hctx() argument
414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
415 hctx->driver_data = nvmeq; in nvme_init_hctx()
806 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_queue_rq() argument
809 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq()
[all …]
Drdma.c302 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_rdma_init_hctx() argument
310 hctx->driver_data = queue; in nvme_rdma_init_hctx()
314 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_rdma_init_admin_hctx() argument
322 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
1687 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_rdma_queue_rq() argument
1690 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
1691 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq()
1743 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) in nvme_rdma_poll() argument
1745 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll()
/Linux-v4.19/drivers/block/
Dvirtio_blk.c217 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq() argument
220 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq()
225 int qid = hctx->queue_num; in virtio_queue_rq()
259 num = blk_rq_map_sg(hctx->queue, req, vbr->sg); in virtio_queue_rq()
274 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq()
Dnull_blk_main.c1422 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument
1426 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1428 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
1546 struct blk_mq_hw_ctx *hctx; in null_init_queues() local
1550 queue_for_each_hw_ctx(q, hctx, i) { in null_init_queues()
1551 if (!hctx->nr_ctx || !hctx->tags) in null_init_queues()
1554 hctx->driver_data = nq; in null_init_queues()
/Linux-v4.19/drivers/block/mtip32xx/
Dmtip32xx.c209 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; in mtip_cmd_from_tag() local
211 return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag)); in mtip_cmd_from_tag()
2190 struct blk_mq_hw_ctx *hctx) in mtip_hw_submit_io() argument
3548 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in mtip_submit_request() argument
3550 struct driver_data *dd = hctx->queue->queuedata; in mtip_submit_request()
3584 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); in mtip_submit_request()
3587 mtip_hw_submit_io(dd, rq, cmd, nents, hctx); in mtip_submit_request()
3591 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, in mtip_check_unal_depth() argument
3594 struct driver_data *dd = hctx->queue->queuedata; in mtip_check_unal_depth()
3615 static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, in mtip_issue_reserved_cmd() argument
[all …]
/Linux-v4.19/drivers/mtd/ubi/
Dblock.c321 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, in ubiblock_queue_rq() argument
325 struct ubiblock *dev = hctx->queue->queuedata; in ubiblock_queue_rq()
/Linux-v4.19/drivers/scsi/
Dscsi_lib.c2051 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) in scsi_mq_put_budget() argument
2053 struct request_queue *q = hctx->queue; in scsi_mq_put_budget()
2060 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) in scsi_mq_get_budget() argument
2062 struct request_queue *q = hctx->queue; in scsi_mq_get_budget()
2076 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); in scsi_mq_get_budget()
2080 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument
2133 scsi_mq_put_budget(hctx); in scsi_queue_rq()

12