/Linux-v6.1/block/ |
D | blk-mq-sched.c | 25 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument 27 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx() 30 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx() 34 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument 36 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart() 47 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart() 61 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local 68 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list() 77 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list() 90 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() argument [all …]
|
D | blk-mq-sysfs.c | 36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local 39 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release() 40 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release() 41 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release() 42 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release() 43 kfree(hctx); in blk_mq_hw_sysfs_release() 56 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 61 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show() 62 q = hctx->queue; in blk_mq_hw_sysfs_show() 68 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show() [all …]
|
D | blk-mq.h | 39 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 43 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 44 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 62 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 66 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 68 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 125 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 164 struct blk_mq_hw_ctx *hctx; member 175 return data->hctx->tags; in blk_mq_tags_from_data() 176 return data->hctx->sched_tags; in blk_mq_tags_from_data() [all …]
|
D | blk-mq.c | 79 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, in blk_qc_to_rq() argument 85 return blk_mq_tag_to_rq(hctx->sched_tags, tag); in blk_qc_to_rq() 86 return blk_mq_tag_to_rq(hctx->tags, tag); in blk_qc_to_rq() 100 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument 102 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending() 103 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending() 104 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending() 110 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument 113 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending() 115 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending() [all …]
|
D | blk-mq-debugfs.c | 202 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local 204 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show() 230 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local 231 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show() 241 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show() 340 __acquires(&hctx->lock) in hctx_dispatch_start() 342 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local 344 spin_lock(&hctx->lock); in hctx_dispatch_start() 345 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start() 350 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local [all …]
|
D | blk-mq-tag.c | 40 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument 44 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_busy() 45 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() 51 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy() 53 set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state); in __blk_mq_tag_busy() 56 users = atomic_inc_return(&hctx->tags->active_queues); in __blk_mq_tag_busy() 58 blk_mq_update_wake_batch(hctx->tags, users); in __blk_mq_tag_busy() 75 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument 77 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() 80 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_idle() [all …]
|
D | blk-mq-sched.h | 17 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 18 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 22 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, 26 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 32 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument 34 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart() 35 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart() 77 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument 79 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work() 82 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work() [all …]
|
D | blk-mq-tag.h | 23 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 37 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument 39 if (!hctx) in bt_wait_ptr() 41 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr() 53 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument 55 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy() 56 __blk_mq_tag_busy(hctx); in blk_mq_tag_busy() 59 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument 61 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle() 64 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
|
D | kyber-iosched.c | 455 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument 457 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated() 458 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated() 466 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 471 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 475 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx() 477 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 481 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx() 485 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx() 486 ilog2(8), GFP_KERNEL, hctx->numa_node, in kyber_init_hctx() [all …]
|
D | blk-mq-debugfs.h | 25 struct blk_mq_hw_ctx *hctx); 26 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 33 struct blk_mq_hw_ctx *hctx); 34 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 44 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument 48 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument 69 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument 73 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
|
D | mq-deadline.c | 514 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument 516 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 562 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument 564 struct request_queue *q = hctx->queue; in dd_depth_updated() 566 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated() 574 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument 576 dd_depth_updated(hctx); in dd_init_hctx() 712 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() argument 715 struct request_queue *q = hctx->queue; in dd_insert_request() 769 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument [all …]
|
D | blk-flush.c | 362 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local 380 blk_mq_sched_restart(hctx); in mq_flush_data_end_io() 528 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument 531 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
|
D | bfq-iosched.c | 5067 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) in bfq_has_work() argument 5069 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() 5079 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in __bfq_dispatch_request() argument 5081 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() 5219 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) in bfq_dispatch_request() argument 5221 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request() 5231 rq = __bfq_dispatch_request(hctx); in bfq_dispatch_request() 5238 bfq_update_dispatch_stats(hctx->queue, rq, in bfq_dispatch_request() 6135 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in bfq_insert_request() argument 6138 struct request_queue *q = hctx->queue; in bfq_insert_request() [all …]
|
D | bsg-lib.c | 272 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, in bsg_queue_rq() argument 275 struct request_queue *q = hctx->queue; in bsg_queue_rq()
|
/Linux-v6.1/net/dccp/ccids/ |
D | ccid3.h | 104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local 105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk() 106 return hctx; in ccid3_hc_tx_sk()
|
/Linux-v6.1/include/linux/ |
D | blk-mq.h | 874 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 875 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 878 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 883 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 884 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 941 #define queue_for_each_hw_ctx(q, hctx, i) \ argument 942 xa_for_each(&(q)->hctx_table, (i), (hctx)) 944 #define hctx_for_each_ctx(hctx, ctx, i) \ argument 945 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 946 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) [all …]
|
/Linux-v6.1/drivers/block/ |
D | virtio_blk.c | 104 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx) in get_virtio_blk_vq() argument 106 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq() 107 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq() 188 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, in virtblk_map_data() argument 204 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); in virtblk_map_data() 304 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() argument 306 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs() 307 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs() 318 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, in virtblk_prep_rq() argument 329 vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr); in virtblk_prep_rq() [all …]
|
/Linux-v6.1/drivers/s390/block/ |
D | scm_blk.c | 282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument 285 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request() 287 struct scm_queue *sq = hctx->driver_data; in scm_blk_request() 331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument 340 hctx->driver_data = qd; in scm_blk_init_hctx() 345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument 347 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx() 350 kfree(hctx->driver_data); in scm_blk_exit_hctx() 351 hctx->driver_data = NULL; in scm_blk_exit_hctx()
|
/Linux-v6.1/drivers/block/rnbd/ |
D | rnbd-clt.c | 139 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue() 143 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue() 1096 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() argument 1099 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue() 1102 blk_mq_delay_run_hw_queue(hctx, delay); in rnbd_clt_dev_kick_mq_queue() 1108 blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); in rnbd_clt_dev_kick_mq_queue() 1111 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rnbd_queue_rq() argument 1126 rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); in rnbd_queue_rq() 1140 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq() 1150 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq() [all …]
|
D | rnbd-clt.h | 105 struct blk_mq_hw_ctx *hctx; member
|
/Linux-v6.1/drivers/nvme/target/ |
D | loop.c | 131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument 134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq() 135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq() 218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument 232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); in nvme_loop_init_hctx() 234 hctx->driver_data = queue; in nvme_loop_init_hctx() 238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument 246 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
|
/Linux-v6.1/drivers/block/null_blk/ |
D | main.c | 1584 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in null_poll() argument 1586 struct nullb_queue *nq = hctx->driver_data; in null_poll() 1614 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq() local 1619 if (hctx->type == HCTX_TYPE_POLL) { in null_timeout_rq() 1620 struct nullb_queue *nq = hctx->driver_data; in null_timeout_rq() 1635 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) in null_timeout_rq() 1640 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument 1644 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq() 1647 const bool is_poll = hctx->type == HCTX_TYPE_POLL; in null_queue_rq() 1649 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq() [all …]
|
/Linux-v6.1/drivers/nvme/host/ |
D | apple.c | 732 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in apple_nvme_queue_rq() argument 735 struct nvme_ns *ns = hctx->queue->queuedata; in apple_nvme_queue_rq() 736 struct apple_nvme_queue *q = hctx->driver_data; in apple_nvme_queue_rq() 775 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in apple_nvme_init_hctx() argument 778 hctx->driver_data = data; in apple_nvme_init_hctx() 922 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, in apple_nvme_poll() argument 925 struct apple_nvme_queue *q = hctx->driver_data; in apple_nvme_poll()
|
D | tcp.c | 486 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_hctx() argument 492 hctx->driver_data = queue; in nvme_tcp_init_hctx() 496 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_tcp_init_admin_hctx() argument 502 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx() 2392 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) in nvme_tcp_commit_rqs() argument 2394 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() 2400 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_tcp_queue_rq() argument 2403 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq() 2404 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() 2467 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in nvme_tcp_poll() argument [all …]
|
/Linux-v6.1/drivers/scsi/ |
D | scsi_lib.c | 1711 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument 1848 static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in scsi_mq_poll() argument 1850 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll() 1853 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll() 1858 static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scsi_init_hctx() argument 1863 hctx->driver_data = shost; in scsi_init_hctx() 1934 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) in scsi_commit_rqs() argument 1936 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs() 1938 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
|