1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_H
3 #define INT_BLK_MQ_H
4 
5 #include "blk-stat.h"
6 #include "blk-mq-tag.h"
7 
8 struct blk_mq_tag_set;
9 
10 /**
11  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12  */
13 struct blk_mq_ctx {
14 	struct {
15 		spinlock_t		lock;
16 		struct list_head	rq_list;
17 	}  ____cacheline_aligned_in_smp;
18 
19 	unsigned int		cpu;
20 	unsigned int		index_hw;
21 
22 	/* incremented at dispatch time */
23 	unsigned long		rq_dispatched[2];
24 	unsigned long		rq_merged;
25 
26 	/* incremented at completion time */
27 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
28 
29 	struct request_queue	*queue;
30 	struct kobject		kobj;
31 } ____cacheline_aligned_in_smp;
32 
33 void blk_mq_freeze_queue(struct request_queue *q);
34 void blk_mq_free_queue(struct request_queue *q);
35 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
36 void blk_mq_wake_waiters(struct request_queue *q);
37 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
38 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
39 bool blk_mq_get_driver_tag(struct request *rq);
40 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
41 					struct blk_mq_ctx *start);
42 
43 /*
44  * Internal helpers for allocating/freeing the request map
45  */
46 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
47 		     unsigned int hctx_idx);
48 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
49 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
50 					unsigned int hctx_idx,
51 					unsigned int nr_tags,
52 					unsigned int reserved_tags);
53 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
54 		     unsigned int hctx_idx, unsigned int depth);
55 
56 /*
57  * Internal helpers for request insertion into sw queues
58  */
59 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
60 				bool at_head);
61 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
62 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
63 				struct list_head *list);
64 
65 /* Used by blk_insert_cloned_request() to issue request directly */
66 blk_status_t blk_mq_request_issue_directly(struct request *rq);
67 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
68 				    struct list_head *list);
69 
70 /*
71  * CPU -> queue mappings
72  */
73 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
74 
blk_mq_map_queue(struct request_queue * q,int cpu)75 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
76 		int cpu)
77 {
78 	return q->queue_hw_ctx[q->mq_map[cpu]];
79 }
80 
81 /*
82  * sysfs helpers
83  */
84 extern void blk_mq_sysfs_init(struct request_queue *q);
85 extern void blk_mq_sysfs_deinit(struct request_queue *q);
86 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
87 extern int blk_mq_sysfs_register(struct request_queue *q);
88 extern void blk_mq_sysfs_unregister(struct request_queue *q);
89 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
90 
91 void blk_mq_release(struct request_queue *q);
92 
93 /**
94  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
95  * @rq: target request.
96  */
blk_mq_rq_state(struct request * rq)97 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
98 {
99 	return READ_ONCE(rq->state);
100 }
101 
__blk_mq_get_ctx(struct request_queue * q,unsigned int cpu)102 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
103 					   unsigned int cpu)
104 {
105 	return per_cpu_ptr(q->queue_ctx, cpu);
106 }
107 
108 /*
109  * This assumes per-cpu software queueing queues. They could be per-node
110  * as well, for instance. For now this is hardcoded as-is. Note that we don't
111  * care about preemption, since we know the ctx's are persistent. This does
112  * mean that we can't rely on ctx always matching the currently running CPU.
113  */
blk_mq_get_ctx(struct request_queue * q)114 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
115 {
116 	return __blk_mq_get_ctx(q, get_cpu());
117 }
118 
blk_mq_put_ctx(struct blk_mq_ctx * ctx)119 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
120 {
121 	put_cpu();
122 }
123 
124 struct blk_mq_alloc_data {
125 	/* input parameter */
126 	struct request_queue *q;
127 	blk_mq_req_flags_t flags;
128 	unsigned int shallow_depth;
129 
130 	/* input & output parameter */
131 	struct blk_mq_ctx *ctx;
132 	struct blk_mq_hw_ctx *hctx;
133 };
134 
blk_mq_tags_from_data(struct blk_mq_alloc_data * data)135 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
136 {
137 	if (data->flags & BLK_MQ_REQ_INTERNAL)
138 		return data->hctx->sched_tags;
139 
140 	return data->hctx->tags;
141 }
142 
blk_mq_hctx_stopped(struct blk_mq_hw_ctx * hctx)143 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
144 {
145 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
146 }
147 
blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx * hctx)148 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
149 {
150 	return hctx->nr_ctx && hctx->tags;
151 }
152 
153 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
154 		      unsigned int inflight[2]);
155 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
156 			 unsigned int inflight[2]);
157 
blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx * hctx)158 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
159 {
160 	struct request_queue *q = hctx->queue;
161 
162 	if (q->mq_ops->put_budget)
163 		q->mq_ops->put_budget(hctx);
164 }
165 
blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx * hctx)166 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
167 {
168 	struct request_queue *q = hctx->queue;
169 
170 	if (q->mq_ops->get_budget)
171 		return q->mq_ops->get_budget(hctx);
172 	return true;
173 }
174 
__blk_mq_put_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)175 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
176 					   struct request *rq)
177 {
178 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
179 	rq->tag = -1;
180 
181 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
182 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
183 		atomic_dec(&hctx->nr_active);
184 	}
185 }
186 
blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx * hctx,struct request * rq)187 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
188 				       struct request *rq)
189 {
190 	if (rq->tag == -1 || rq->internal_tag == -1)
191 		return;
192 
193 	__blk_mq_put_driver_tag(hctx, rq);
194 }
195 
blk_mq_put_driver_tag(struct request * rq)196 static inline void blk_mq_put_driver_tag(struct request *rq)
197 {
198 	struct blk_mq_hw_ctx *hctx;
199 
200 	if (rq->tag == -1 || rq->internal_tag == -1)
201 		return;
202 
203 	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
204 	__blk_mq_put_driver_tag(hctx, rq);
205 }
206 
blk_mq_clear_mq_map(struct blk_mq_tag_set * set)207 static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
208 {
209 	int cpu;
210 
211 	for_each_possible_cpu(cpu)
212 		set->mq_map[cpu] = 0;
213 }
214 
215 #endif
216