Home
last modified time | relevance | path

Searched refs:hctx (Results 1 – 25 of 57) sorted by relevance

123

/Linux-v5.4/block/
Dblk-mq-sched.c23 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_free_hctx_data() local
26 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_hctx_data()
27 if (exit && hctx->sched_data) in blk_mq_sched_free_hctx_data()
28 exit(hctx); in blk_mq_sched_free_hctx_data()
29 kfree(hctx->sched_data); in blk_mq_sched_free_hctx_data()
30 hctx->sched_data = NULL; in blk_mq_sched_free_hctx_data()
65 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
67 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
70 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
74 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
[all …]
Dblk-mq.c67 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
69 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
70 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
71 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
77 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
80 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
82 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
83 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
86 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
89 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
[all …]
Dblk-mq-sysfs.c36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local
39 cancel_delayed_work_sync(&hctx->run_work); in blk_mq_hw_sysfs_release()
41 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_hw_sysfs_release()
42 cleanup_srcu_struct(hctx->srcu); in blk_mq_hw_sysfs_release()
43 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release()
44 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release()
45 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release()
46 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
47 kfree(hctx); in blk_mq_hw_sysfs_release()
112 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
[all …]
Dblk-mq-debugfs.c221 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local
223 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show()
247 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local
248 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show()
258 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show()
358 __acquires(&hctx->lock) in hctx_dispatch_start()
360 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local
362 spin_lock(&hctx->lock); in hctx_dispatch_start()
363 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start()
368 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local
[all …]
Dblk-mq.h46 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
67 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
70 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
75 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
127 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
166 struct blk_mq_hw_ctx *hctx; member
172 return data->hctx->sched_tags; in blk_mq_tags_from_data()
174 return data->hctx->tags; in blk_mq_tags_from_data()
177 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_stopped() argument
[all …]
Dblk-mq-tag.c32 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
34 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy()
35 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
36 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy()
55 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
57 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
59 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_idle()
71 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, in hctx_may_queue() argument
76 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) in hctx_may_queue()
78 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
[all …]
Dblk-mq-tag.h29 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
32 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
40 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument
42 if (!hctx) in bt_wait_ptr()
44 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
56 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
58 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_busy()
61 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy()
64 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument
66 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_idle()
[all …]
Dkyber-iosched.c461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument
463 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_init_hctx()
467 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
471 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
473 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
477 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx()
481 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
482 ilog2(8), GFP_KERNEL, hctx->numa_node)) { in kyber_init_hctx()
496 khd->domain_wait[i].wait.private = hctx; in kyber_init_hctx()
504 hctx->sched_data = khd; in kyber_init_hctx()
[all …]
Dblk-mq-debugfs.h24 struct blk_mq_hw_ctx *hctx);
25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
32 struct blk_mq_hw_ctx *hctx);
33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument
52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument
77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
Dblk-mq-sched.h19 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
20 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
24 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
28 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
73 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument
75 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
78 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
83 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument
85 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_needs_restart()
Dmq-deadline.c381 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument
383 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request()
462 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, in dd_bio_merge() argument
465 struct request_queue *q = hctx->queue; in dd_bio_merge()
483 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() argument
486 struct request_queue *q = hctx->queue; in dd_insert_request()
523 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument
526 struct request_queue *q = hctx->queue; in dd_insert_requests()
535 dd_insert_request(hctx, rq, at_head); in dd_insert_requests()
578 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work() argument
[all …]
Dblk-flush.c213 struct blk_mq_hw_ctx *hctx; in flush_end_io() local
227 hctx = flush_rq->mq_hctx; in flush_end_io()
229 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); in flush_end_io()
329 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local
347 blk_mq_sched_restart(hctx); in mq_flush_data_end_io()
Dbfq-iosched.c2209 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, in bfq_bio_merge() argument
2212 struct request_queue *q = hctx->queue; in bfq_bio_merge()
4625 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument
4627 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work()
4637 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() argument
4639 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request()
4777 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() argument
4779 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request()
4789 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request()
4796 bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, in bfq_dispatch_request()
[all …]
Dblk.h52 is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) in is_flush_rq() argument
54 return hctx->fq->flush_rq == req; in is_flush_rq()
Dbsg-lib.c261 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, in bsg_queue_rq() argument
264 struct request_queue *q = hctx->queue; in bsg_queue_rq()
/Linux-v5.4/include/linux/
Dblk-mq.h318 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
319 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
322 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
326 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
327 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
359 #define queue_for_each_hw_ctx(q, hctx, i) \ argument
361 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
363 #define hctx_for_each_ctx(hctx, ctx, i) \ argument
364 for ((i) = 0; (i) < (hctx)->nr_ctx && \
365 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
[all …]
/Linux-v5.4/net/dccp/ccids/
Dccid3.h104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
106 return hctx; in ccid3_hc_tx_sk()
/Linux-v5.4/drivers/s390/block/
Dscm_blk.c282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument
285 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request()
287 struct scm_queue *sq = hctx->driver_data; in scm_blk_request()
331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument
340 hctx->driver_data = qd; in scm_blk_init_hctx()
345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument
347 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx()
350 kfree(hctx->driver_data); in scm_blk_exit_hctx()
351 hctx->driver_data = NULL; in scm_blk_exit_hctx()
/Linux-v5.4/drivers/nvme/target/
Dloop.c132 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument
135 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq()
136 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq()
215 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument
223 hctx->driver_data = queue; in nvme_loop_init_hctx()
227 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument
235 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
/Linux-v5.4/arch/um/drivers/
Dubd_kern.c195 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
1312 static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, in ubd_queue_one_vec() argument
1315 struct ubd *dev = hctx->queue->queuedata; in ubd_queue_one_vec()
1359 static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req) in queue_rw_req() argument
1367 ret = ubd_queue_one_vec(hctx, req, off, &bvec); in queue_rw_req()
1375 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, in ubd_queue_rq() argument
1378 struct ubd *ubd_dev = hctx->queue->queuedata; in ubd_queue_rq()
1389 ret = ubd_queue_one_vec(hctx, req, 0, NULL); in ubd_queue_rq()
1393 ret = queue_rw_req(hctx, req); in ubd_queue_rq()
1397 ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL); in ubd_queue_rq()
/Linux-v5.4/drivers/block/
Dvirtio_blk.c257 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() argument
259 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs()
260 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
271 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq() argument
274 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq()
279 int qid = hctx->queue_num; in virtio_queue_rq()
327 num = blk_rq_map_sg(hctx->queue, req, vbr->sg); in virtio_queue_rq()
342 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq()
Dnull_blk_main.c1323 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument
1327 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1331 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
1450 struct blk_mq_hw_ctx *hctx; in null_init_queues() local
1454 queue_for_each_hw_ctx(q, hctx, i) { in null_init_queues()
1455 if (!hctx->nr_ctx || !hctx->tags) in null_init_queues()
1458 hctx->driver_data = nq; in null_init_queues()
/Linux-v5.4/drivers/nvme/host/
Dpci.c372 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_admin_init_hctx() argument
379 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
382 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
387 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in nvme_admin_exit_hctx() argument
389 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_admin_exit_hctx()
394 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_init_hctx() argument
403 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
404 hctx->driver_data = nvmeq; in nvme_init_hctx()
501 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_commit_rqs() argument
503 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
[all …]
Dfc.c32 struct blk_mq_hw_ctx *hctx; member
1836 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_hctx() argument
1841 hctx->driver_data = queue; in __nvme_fc_init_hctx()
1842 queue->hctx = hctx; in __nvme_fc_init_hctx()
1846 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_fc_init_hctx() argument
1851 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); in nvme_fc_init_hctx()
1857 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_fc_init_admin_hctx() argument
1862 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); in nvme_fc_init_admin_hctx()
2321 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_fc_queue_rq() argument
2324 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_fc_queue_rq()
[all …]
/Linux-v5.4/drivers/scsi/
Dscsi_lib.c1623 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) in scsi_mq_put_budget() argument
1625 struct request_queue *q = hctx->queue; in scsi_mq_put_budget()
1631 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) in scsi_mq_get_budget() argument
1633 struct request_queue *q = hctx->queue; in scsi_mq_get_budget()
1640 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); in scsi_mq_get_budget()
1644 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument
1705 scsi_mq_put_budget(hctx); in scsi_queue_rq()
1843 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) in scsi_commit_rqs() argument
1845 struct request_queue *q = hctx->queue; in scsi_commit_rqs()
1849 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()

123