Home
last modified time | relevance | path

Searched refs:blk_mq_hw_ctx (Results 1 – 25 of 35) sorted by relevance

12

/Linux-v4.19/block/
Dblk-mq.h38 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
40 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
59 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
62 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
67 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
75 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, in blk_mq_map_queue()
89 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
132 struct blk_mq_hw_ctx *hctx;
143 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_stopped()
148 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) in blk_mq_hw_queue_mapped()
[all …]
Dblk-mq-debugfs.h24 struct blk_mq_hw_ctx *hctx);
25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
32 struct blk_mq_hw_ctx *hctx);
33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
45 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx()
50 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx()
73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx()
78 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx()
Dblk-mq-tag.h29 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
32 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
40 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr()
53 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
54 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
56 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy()
64 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle()
78 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, in blk_mq_tag_set_rq()
Dblk-mq-sysfs.c22 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release()
37 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
38 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
91 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show()
96 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
115 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store()
120 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_store()
134 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, in blk_mq_hw_sysfs_nr_tags_show()
140 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, in blk_mq_hw_sysfs_nr_reserved_tags_show()
146 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) in blk_mq_hw_sysfs_cpus_show()
[all …]
Dblk-mq-sched.c20 void (*exit)(struct blk_mq_hw_ctx *)) in blk_mq_sched_free_hctx_data() argument
22 struct blk_mq_hw_ctx *hctx; in blk_mq_sched_free_hctx_data()
57 static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx()
65 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart()
79 static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_sched()
110 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_next_ctx()
126 static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_ctx()
162 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_dispatch_requests()
314 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); in __blk_mq_sched_bio_merge()
346 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, in blk_mq_sched_bypass_insert()
[all …]
Dblk-mq-debugfs.c228 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show()
255 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show()
385 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start()
393 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next()
401 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_stop()
415 struct blk_mq_hw_ctx *hctx;
434 struct blk_mq_hw_ctx *hctx = data; in hctx_busy_show()
445 struct blk_mq_hw_ctx *hctx = data; in hctx_ctx_map_show()
470 struct blk_mq_hw_ctx *hctx = data; in hctx_tags_show()
487 struct blk_mq_hw_ctx *hctx = data; in hctx_tags_bitmap_show()
[all …]
Dblk-mq.c64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending()
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending()
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending()
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight()
118 static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight_rw()
228 struct blk_mq_hw_ctx *hctx; in blk_mq_quiesce_queue()
263 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters()
271 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_can_queue()
475 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); in __blk_mq_free_request()
491 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); in blk_mq_free_request()
[all …]
Dblk-mq-sched.h9 void (*exit)(struct blk_mq_hw_ctx *));
18 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
26 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
78 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work()
88 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart()
Dblk-mq-tag.c30 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy()
53 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle()
69 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, in hctx_may_queue()
200 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, in blk_mq_put_tag()
215 struct blk_mq_hw_ctx *hctx;
224 struct blk_mq_hw_ctx *hctx = iter_data->hctx; in bt_iter()
242 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, in bt_for_each()
320 struct blk_mq_hw_ctx *hctx; in blk_mq_queue_tag_busy_iter()
403 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, in blk_mq_tag_update_depth()
471 struct blk_mq_hw_ctx *hctx; in blk_mq_unique_tag()
Dkyber-iosched.c406 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx()
461 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_exit_hctx()
509 static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) in kyber_bio_merge()
531 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, in kyber_insert_requests()
635 struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); in kyber_domain_wake()
644 struct blk_mq_hw_ctx *hctx) in kyber_get_domain_token()
692 struct blk_mq_hw_ctx *hctx) in kyber_dispatch_cur_domain()
733 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) in kyber_dispatch_request()
779 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) in kyber_has_work()
843 struct blk_mq_hw_ctx *hctx = m->private; \
[all …]
Dmq-deadline.c380 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request()
461 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) in dd_bio_merge()
481 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request()
521 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests()
567 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work()
Dblk-flush.c231 struct blk_mq_hw_ctx *hctx; in flush_end_io()
338 struct blk_mq_hw_ctx *hctx; in blk_kick_flush()
407 struct blk_mq_hw_ctx *hctx; in mq_flush_data_end_io()
Dbfq-iosched.c1831 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) in bfq_bio_merge()
3834 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work()
3846 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request()
3985 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request()
4566 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request()
4621 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, in bfq_insert_requests()
5226 static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) in bfq_init_hctx()
/Linux-v4.19/include/linux/
Dblk-mq.h15 struct blk_mq_hw_ctx { struct
100 typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
102 typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
103 typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
105 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
106 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
112 typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
115 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
215 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
267 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
[all …]
Delevator.h95 struct blk_mq_hw_ctx;
100 int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
101 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
104 bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
111 void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
112 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
113 bool (*has_work)(struct blk_mq_hw_ctx *);
/Linux-v4.19/drivers/s390/block/
Dscm_blk.c282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request()
331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx()
345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx()
/Linux-v4.19/drivers/nvme/target/
Dloop.c154 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq()
235 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx()
247 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx()
/Linux-v4.19/drivers/nvme/host/
Dfc.c43 struct blk_mq_hw_ctx *hctx;
1801 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_hctx()
1811 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_fc_init_hctx()
1822 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_fc_init_admin_hctx()
2259 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_fc_queue_rq()
2302 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) in nvme_fc_poll()
Drdma.c302 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_rdma_init_hctx()
314 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_rdma_init_admin_hctx()
1687 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_rdma_queue_rq()
1743 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) in nvme_rdma_poll()
Dpci.c383 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_admin_init_hctx()
398 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in nvme_admin_exit_hctx()
405 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_init_hctx()
806 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_queue_rq()
980 static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) in nvme_poll()
/Linux-v4.19/drivers/mmc/core/
Dqueue.c241 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, in mmc_mq_queue_rq()
/Linux-v4.19/drivers/block/mtip32xx/
Dmtip32xx.c209 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; in mtip_cmd_from_tag()
2190 struct blk_mq_hw_ctx *hctx) in mtip_hw_submit_io()
3548 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in mtip_submit_request()
3591 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, in mtip_check_unal_depth()
3615 static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, in mtip_issue_reserved_cmd()
3651 static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, in mtip_queue_rq()
/Linux-v4.19/drivers/mtd/ubi/
Dblock.c321 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, in ubiblock_queue_rq()
/Linux-v4.19/drivers/md/
Ddm-rq.c741 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, in dm_mq_queue_rq()
/Linux-v4.19/drivers/scsi/
Dscsi_lib.c2051 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) in scsi_mq_put_budget()
2060 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) in scsi_mq_get_budget()
2080 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq()

12