| /Linux-v5.4/block/ | 
| D | blk-mq-sched.c | 23 	struct blk_mq_hw_ctx *hctx;  in blk_mq_sched_free_hctx_data()  local65 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_mark_restart_hctx()
 74 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_restart()
 88 static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)  in blk_mq_do_dispatch_sched()
 118 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,  in blk_mq_next_ctx()
 134 static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_do_dispatch_ctx()
 170 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_dispatch_requests()
 308 				 struct blk_mq_hw_ctx *hctx,  in blk_mq_attempt_merge()
 329 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);  in __blk_mq_sched_bio_merge()  local
 360 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,  in blk_mq_sched_bypass_insert()
 [all …]
 
 | 
| D | blk-mq.c | 67 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)  in blk_mq_hctx_has_pending()77 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,  in blk_mq_hctx_mark_pending()
 86 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,  in blk_mq_hctx_clear_pending()
 99 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,  in blk_mq_check_inflight()
 125 static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,  in blk_mq_check_inflight_rw()
 236 	struct blk_mq_hw_ctx *hctx;  in blk_mq_quiesce_queue()  local
 271 	struct blk_mq_hw_ctx *hctx;  in blk_mq_wake_waiters()  local
 279 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)  in blk_mq_can_queue()
 496 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;  in __blk_mq_free_request()  local
 514 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;  in blk_mq_free_request()  local
 [all …]
 
 | 
| D | blk-mq-debugfs.c | 221 	struct blk_mq_hw_ctx *hctx = data;  in hctx_state_show()  local247 	struct blk_mq_hw_ctx *hctx = data;  in hctx_flags_show()  local
 360 	struct blk_mq_hw_ctx *hctx = m->private;  in hctx_dispatch_start()  local
 368 	struct blk_mq_hw_ctx *hctx = m->private;  in hctx_dispatch_next()  local
 376 	struct blk_mq_hw_ctx *hctx = m->private;  in hctx_dispatch_stop()  local
 390 	struct blk_mq_hw_ctx	*hctx;  member
 411 	struct blk_mq_hw_ctx *hctx = data;  in hctx_busy_show()  local
 428 	struct blk_mq_hw_ctx *hctx = data;  in hctx_type_show()  local
 437 	struct blk_mq_hw_ctx *hctx = data;  in hctx_ctx_map_show()  local
 462 	struct blk_mq_hw_ctx *hctx = data;  in hctx_tags_show()  local
 [all …]
 
 | 
| D | blk-mq-sysfs.c | 36 	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,  in blk_mq_hw_sysfs_release()  local112 	struct blk_mq_hw_ctx *hctx;  in blk_mq_hw_sysfs_show()  local
 136 	struct blk_mq_hw_ctx *hctx;  in blk_mq_hw_sysfs_store()  local
 155 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,  in blk_mq_hw_sysfs_nr_tags_show()
 161 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,  in blk_mq_hw_sysfs_nr_reserved_tags_show()
 167 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)  in blk_mq_hw_sysfs_cpus_show()
 232 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_unregister_hctx()
 246 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_register_hctx()
 270 	struct blk_mq_hw_ctx *hctx;  in blk_mq_unregister_dev()  local
 285 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)  in blk_mq_hctx_kobj_init()
 [all …]
 
 | 
| D | blk-mq-tag.c | 32 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)  in __blk_mq_tag_busy()55 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)  in __blk_mq_tag_idle()
 71 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,  in hctx_may_queue()
 194 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,  in blk_mq_put_tag()
 209 	struct blk_mq_hw_ctx *hctx;  member
 218 	struct blk_mq_hw_ctx *hctx = iter_data->hctx;  in bt_iter()  local
 250 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,  in bt_for_each()
 406 	struct blk_mq_hw_ctx *hctx;  in blk_mq_queue_tag_busy_iter()  local
 491 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,  in blk_mq_tag_update_depth()
 
 | 
| D | blk-mq-tag.h | 40 						 struct blk_mq_hw_ctx *hctx)  in bt_wait_ptr()56 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)  in blk_mq_tag_busy()
 64 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)  in blk_mq_tag_idle()
 78 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,  in blk_mq_tag_set_rq()
 
 | 
| D | blk-mq.h | 166 	struct blk_mq_hw_ctx *hctx;  member177 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)  in blk_mq_hctx_stopped()
 182 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)  in blk_mq_hw_queue_mapped()
 191 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)  in blk_mq_put_dispatch_budget()
 199 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)  in blk_mq_get_dispatch_budget()
 208 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,  in __blk_mq_put_driver_tag()
 
 | 
| D | blk-mq-debugfs.h | 48 						struct blk_mq_hw_ctx *hctx)  in blk_mq_debugfs_register_hctx()52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_debugfs_unregister_hctx()
 73 						      struct blk_mq_hw_ctx *hctx)  in blk_mq_debugfs_register_sched_hctx()
 77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)  in blk_mq_debugfs_unregister_sched_hctx()
 
 | 
| D | kyber-iosched.c | 461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)  in kyber_init_hctx()517 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)  in kyber_exit_hctx()
 565 static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,  in kyber_bio_merge()
 587 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,  in kyber_insert_requests()
 694 	struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);  in kyber_domain_wake()  local
 704 				  struct blk_mq_hw_ctx *hctx)  in kyber_get_domain_token()
 752 			  struct blk_mq_hw_ctx *hctx)  in kyber_dispatch_cur_domain()
 799 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)  in kyber_dispatch_request()
 845 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)  in kyber_has_work()
 967 	struct blk_mq_hw_ctx *hctx = data;  in kyber_cur_domain_show()  local
 [all …]
 
 | 
| D | blk-mq-sched.h | 73 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_has_work()83 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)  in blk_mq_sched_needs_restart()
 
 | 
| D | mq-deadline.c | 381 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)  in dd_dispatch_request()462 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,  in dd_bio_merge()
 483 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,  in dd_insert_request()
 523 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,  in dd_insert_requests()
 578 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)  in dd_has_work()
 
 | 
| D | blk-flush.c | 213 	struct blk_mq_hw_ctx *hctx;  in flush_end_io()  local329 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;  in mq_flush_data_end_io()  local
 
 | 
| D | bfq-iosched.c | 2209 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,  in bfq_bio_merge()4625 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)  in bfq_has_work()
 4637 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)  in __bfq_dispatch_request()
 4777 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)  in bfq_dispatch_request()
 5478 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,  in bfq_insert_request()
 5533 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,  in bfq_insert_requests()
 6344 static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)  in bfq_depth_updated()
 6354 static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)  in bfq_init_hctx()
 
 | 
| D | bsg-lib.c | 261 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,  in bsg_queue_rq()
 | 
| /Linux-v5.4/drivers/s390/block/ | 
| D | scm_blk.c | 282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,  in scm_blk_request()331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in scm_blk_init_hctx()
 345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)  in scm_blk_exit_hctx()
 
 | 
| /Linux-v5.4/drivers/nvme/target/ | 
| D | loop.c | 132 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,  in nvme_loop_queue_rq()215 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in nvme_loop_init_hctx()
 227 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in nvme_loop_init_admin_hctx()
 
 | 
| /Linux-v5.4/include/linux/ | 
| D | blk-mq.h | 359 #define queue_for_each_hw_ctx(q, hctx, i)				\  argument363 #define hctx_for_each_ctx(hctx, ctx, i)					\  argument
 367 static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,  in request_to_qc_t()
 
 | 
| /Linux-v5.4/drivers/block/ | 
| D | z2ram.c | 69 static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,  in z2_queue_rq()
 | 
| /Linux-v5.4/net/dccp/ccids/ | 
| D | ccid3.h | 104 	struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);  in ccid3_hc_tx_sk()  local
 | 
| /Linux-v5.4/drivers/scsi/ | 
| D | scsi_lib.c | 1623 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)  in scsi_mq_put_budget()1631 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)  in scsi_mq_get_budget()
 1644 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,  in scsi_queue_rq()
 1843 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)  in scsi_commit_rqs()
 
 | 
| /Linux-v5.4/drivers/mtd/ | 
| D | mtd_blkdevs.c | 185 static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,  in mtd_queue_rq()
 | 
| /Linux-v5.4/drivers/nvme/host/ | 
| D | pci.c | 372 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in nvme_admin_init_hctx()387 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)  in nvme_admin_exit_hctx()
 394 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in nvme_init_hctx()
 501 static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)  in nvme_commit_rqs()
 875 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,  in nvme_queue_rq()
 1082 static int nvme_poll(struct blk_mq_hw_ctx *hctx)  in nvme_poll()
 
 | 
| D | rdma.c | 306 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in nvme_rdma_init_hctx()318 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,  in nvme_rdma_init_admin_hctx()
 1730 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,  in nvme_rdma_queue_rq()
 1800 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)  in nvme_rdma_poll()
 
 | 
| /Linux-v5.4/drivers/block/mtip32xx/ | 
| D | mtip32xx.c | 165 	struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];  in mtip_cmd_from_tag()  local2058 			      struct blk_mq_hw_ctx *hctx)  in mtip_hw_submit_io()
 3422 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,  in mtip_check_unal_depth()
 3446 static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,  in mtip_issue_reserved_cmd()
 3483 static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,  in mtip_queue_rq()
 
 | 
| /Linux-v5.4/arch/um/drivers/ | 
| D | ubd_kern.c | 1312 static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,  in ubd_queue_one_vec()1359 static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)  in queue_rw_req()
 1375 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,  in ubd_queue_rq()
 
 |