Home
last modified time | relevance | path

Searched refs:blk_mq_hw_ctx (Results 1 – 25 of 65) sorted by relevance

123

/Linux-v5.10/block/
Dblk-mq.h26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
43 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
47 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
68 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
72 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
77 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
91 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, in blk_mq_map_queue_type()
104 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, in blk_mq_map_queue()
129 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
[all …]
Dblk-mq-debugfs.h24 struct blk_mq_hw_ctx *hctx);
25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
32 struct blk_mq_hw_ctx *hctx);
33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx()
52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx()
73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx()
77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx()
Dblk-mq-sysfs.c36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release()
56 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
57 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
106 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show()
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
128 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store()
133 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_store()
145 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, in blk_mq_hw_sysfs_nr_tags_show()
151 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, in blk_mq_hw_sysfs_nr_reserved_tags_show()
157 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) in blk_mq_hw_sysfs_cpus_show()
[all …]
Dblk-mq-sched.c51 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx()
60 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart()
88 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list()
117 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched()
194 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_sched()
205 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_next_ctx()
224 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) in blk_mq_do_dispatch_ctx()
274 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_dispatch_requests()
327 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_dispatch_requests()
352 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in __blk_mq_sched_bio_merge()
[all …]
Dblk-mq-tag.h37 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
50 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr()
63 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
64 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
66 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy()
74 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle()
Dblk-mq-debugfs.c226 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show()
253 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show()
365 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start()
373 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next()
381 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_stop()
395 struct blk_mq_hw_ctx *hctx;
415 struct blk_mq_hw_ctx *hctx = data; in hctx_busy_show()
432 struct blk_mq_hw_ctx *hctx = data; in hctx_type_show()
441 struct blk_mq_hw_ctx *hctx = data; in hctx_ctx_map_show()
466 struct blk_mq_hw_ctx *hctx = data; in hctx_tags_show()
[all …]
Dblk-mq.c70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending()
80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending()
89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending()
102 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight()
223 struct blk_mq_hw_ctx *hctx; in blk_mq_quiesce_queue()
258 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters()
490 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request()
509 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request()
702 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) in hctx_unlock()
711 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) in hctx_lock()
[all …]
Dblk-mq-sched.h16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
21 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
25 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
70 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work()
80 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart()
Dkyber-iosched.c461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx()
517 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_exit_hctx()
565 static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, in kyber_bio_merge()
587 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, in kyber_insert_requests()
694 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); in kyber_domain_wake()
704 struct blk_mq_hw_ctx *hctx) in kyber_get_domain_token()
752 struct blk_mq_hw_ctx *hctx) in kyber_dispatch_cur_domain()
799 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) in kyber_dispatch_request()
845 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) in kyber_has_work()
909 struct blk_mq_hw_ctx *hctx = m->private; \
[all …]
Dblk-mq-tag.c24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy()
56 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle()
196 struct blk_mq_hw_ctx *hctx;
205 struct blk_mq_hw_ctx *hctx = iter_data->hctx; in bt_iter()
237 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, in bt_for_each()
413 struct blk_mq_hw_ctx *hctx; in blk_mq_queue_tag_busy_iter()
539 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, in blk_mq_tag_update_depth()
Dmq-deadline.c381 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request()
464 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, in dd_bio_merge()
485 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request()
525 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests()
581 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work()
Dblk-flush.c341 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
Dbfq-iosched.c2213 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, in bfq_bio_merge()
4639 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work()
4654 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request()
4794 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request()
5492 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request()
5551 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, in bfq_insert_requests()
6362 static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) in bfq_depth_updated()
6372 static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) in bfq_init_hctx()
Dbsg-lib.c264 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, in bsg_queue_rq()
/Linux-v5.10/include/linux/
Dblk-mq.h16 struct blk_mq_hw_ctx { struct
282 typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
294 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
304 void (*commit_rqs)(struct blk_mq_hw_ctx *);
327 int (*poll)(struct blk_mq_hw_ctx *);
339 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
343 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
506 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
507 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
510 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
[all …]
Delevator.h27 struct blk_mq_hw_ctx;
32 int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
33 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
34 void (*depth_updated)(struct blk_mq_hw_ctx *);
37 bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
44 void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
45 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
46 bool (*has_work)(struct blk_mq_hw_ctx *);
/Linux-v5.10/drivers/block/rnbd/
Drnbd-clt.h99 struct blk_mq_hw_ctx *hctx;
Drnbd-clt.c1101 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue()
1116 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rnbd_queue_rq()
1243 struct blk_mq_hw_ctx *hctx) in rnbd_init_hw_queue()
1253 struct blk_mq_hw_ctx *hctx; in rnbd_init_mq_hw_queues()
/Linux-v5.10/drivers/s390/block/
Dscm_blk.c283 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request()
332 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx()
346 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx()
/Linux-v5.10/drivers/nvme/target/
Dloop.c131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq()
214 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx()
226 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx()
/Linux-v5.10/arch/um/drivers/
Dubd_kern.c196 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
1313 static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, in ubd_queue_one_vec()
1360 static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req) in queue_rw_req()
1376 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, in ubd_queue_rq()
/Linux-v5.10/drivers/block/
Dz2ram.c69 static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx, in z2_queue_rq()
/Linux-v5.10/drivers/block/paride/
Dpcd.c189 static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
816 static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx, in pcd_queue_rq()
Dpf.c209 static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
872 static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx, in pf_queue_rq()
/Linux-v5.10/drivers/nvme/host/
Dtcp.c425 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_hctx()
435 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_admin_hctx()
2286 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_tcp_commit_rqs()
2294 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_tcp_queue_rq()
2363 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) in nvme_tcp_poll()

123