1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15 
blk_mq_sysfs_release(struct kobject * kobj)16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19 
blk_mq_hw_sysfs_release(struct kobject * kobj)20 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
21 {
22 	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
23 						  kobj);
24 	free_cpumask_var(hctx->cpumask);
25 	kfree(hctx->ctxs);
26 	kfree(hctx);
27 }
28 
29 struct blk_mq_ctx_sysfs_entry {
30 	struct attribute attr;
31 	ssize_t (*show)(struct blk_mq_ctx *, char *);
32 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
33 };
34 
35 struct blk_mq_hw_ctx_sysfs_entry {
36 	struct attribute attr;
37 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
38 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
39 };
40 
blk_mq_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)41 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
42 				 char *page)
43 {
44 	struct blk_mq_ctx_sysfs_entry *entry;
45 	struct blk_mq_ctx *ctx;
46 	struct request_queue *q;
47 	ssize_t res;
48 
49 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
50 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
51 	q = ctx->queue;
52 
53 	if (!entry->show)
54 		return -EIO;
55 
56 	res = -ENOENT;
57 	mutex_lock(&q->sysfs_lock);
58 	if (!blk_queue_dying(q))
59 		res = entry->show(ctx, page);
60 	mutex_unlock(&q->sysfs_lock);
61 	return res;
62 }
63 
blk_mq_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)64 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
65 				  const char *page, size_t length)
66 {
67 	struct blk_mq_ctx_sysfs_entry *entry;
68 	struct blk_mq_ctx *ctx;
69 	struct request_queue *q;
70 	ssize_t res;
71 
72 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
73 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
74 	q = ctx->queue;
75 
76 	if (!entry->store)
77 		return -EIO;
78 
79 	res = -ENOENT;
80 	mutex_lock(&q->sysfs_lock);
81 	if (!blk_queue_dying(q))
82 		res = entry->store(ctx, page, length);
83 	mutex_unlock(&q->sysfs_lock);
84 	return res;
85 }
86 
blk_mq_hw_sysfs_show(struct kobject * kobj,struct attribute * attr,char * page)87 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
88 				    struct attribute *attr, char *page)
89 {
90 	struct blk_mq_hw_ctx_sysfs_entry *entry;
91 	struct blk_mq_hw_ctx *hctx;
92 	struct request_queue *q;
93 	ssize_t res;
94 
95 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
96 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
97 	q = hctx->queue;
98 
99 	if (!entry->show)
100 		return -EIO;
101 
102 	res = -ENOENT;
103 	mutex_lock(&q->sysfs_lock);
104 	if (!blk_queue_dying(q))
105 		res = entry->show(hctx, page);
106 	mutex_unlock(&q->sysfs_lock);
107 	return res;
108 }
109 
blk_mq_hw_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)110 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
111 				     struct attribute *attr, const char *page,
112 				     size_t length)
113 {
114 	struct blk_mq_hw_ctx_sysfs_entry *entry;
115 	struct blk_mq_hw_ctx *hctx;
116 	struct request_queue *q;
117 	ssize_t res;
118 
119 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
120 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
121 	q = hctx->queue;
122 
123 	if (!entry->store)
124 		return -EIO;
125 
126 	res = -ENOENT;
127 	mutex_lock(&q->sysfs_lock);
128 	if (!blk_queue_dying(q))
129 		res = entry->store(hctx, page, length);
130 	mutex_unlock(&q->sysfs_lock);
131 	return res;
132 }
133 
blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx * hctx,char * page)134 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
135 					    char *page)
136 {
137 	return sprintf(page, "%u\n", hctx->tags->nr_tags);
138 }
139 
blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx * hctx,char * page)140 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
141 						     char *page)
142 {
143 	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
144 }
145 
blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx * hctx,char * page)146 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
147 {
148 	unsigned int i, first = 1;
149 	ssize_t ret = 0;
150 
151 	for_each_cpu(i, hctx->cpumask) {
152 		if (first)
153 			ret += sprintf(ret + page, "%u", i);
154 		else
155 			ret += sprintf(ret + page, ", %u", i);
156 
157 		first = 0;
158 	}
159 
160 	ret += sprintf(ret + page, "\n");
161 	return ret;
162 }
163 
164 static struct attribute *default_ctx_attrs[] = {
165 	NULL,
166 };
167 
168 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
169 	.attr = {.name = "nr_tags", .mode = 0444 },
170 	.show = blk_mq_hw_sysfs_nr_tags_show,
171 };
172 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
173 	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
174 	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
175 };
176 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
177 	.attr = {.name = "cpu_list", .mode = 0444 },
178 	.show = blk_mq_hw_sysfs_cpus_show,
179 };
180 
181 static struct attribute *default_hw_ctx_attrs[] = {
182 	&blk_mq_hw_sysfs_nr_tags.attr,
183 	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
184 	&blk_mq_hw_sysfs_cpus.attr,
185 	NULL,
186 };
187 
188 static const struct sysfs_ops blk_mq_sysfs_ops = {
189 	.show	= blk_mq_sysfs_show,
190 	.store	= blk_mq_sysfs_store,
191 };
192 
193 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
194 	.show	= blk_mq_hw_sysfs_show,
195 	.store	= blk_mq_hw_sysfs_store,
196 };
197 
198 static struct kobj_type blk_mq_ktype = {
199 	.sysfs_ops	= &blk_mq_sysfs_ops,
200 	.release	= blk_mq_sysfs_release,
201 };
202 
203 static struct kobj_type blk_mq_ctx_ktype = {
204 	.sysfs_ops	= &blk_mq_sysfs_ops,
205 	.default_attrs	= default_ctx_attrs,
206 	.release	= blk_mq_sysfs_release,
207 };
208 
209 static struct kobj_type blk_mq_hw_ktype = {
210 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
211 	.default_attrs	= default_hw_ctx_attrs,
212 	.release	= blk_mq_hw_sysfs_release,
213 };
214 
blk_mq_unregister_hctx(struct blk_mq_hw_ctx * hctx)215 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
216 {
217 	struct blk_mq_ctx *ctx;
218 	int i;
219 
220 	if (!hctx->nr_ctx)
221 		return;
222 
223 	hctx_for_each_ctx(hctx, ctx, i)
224 		kobject_del(&ctx->kobj);
225 
226 	kobject_del(&hctx->kobj);
227 }
228 
blk_mq_register_hctx(struct blk_mq_hw_ctx * hctx)229 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
230 {
231 	struct request_queue *q = hctx->queue;
232 	struct blk_mq_ctx *ctx;
233 	int i, ret;
234 
235 	if (!hctx->nr_ctx)
236 		return 0;
237 
238 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
239 	if (ret)
240 		return ret;
241 
242 	hctx_for_each_ctx(hctx, ctx, i) {
243 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
244 		if (ret)
245 			break;
246 	}
247 
248 	return ret;
249 }
250 
blk_mq_unregister_dev(struct device * dev,struct request_queue * q)251 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
252 {
253 	struct blk_mq_hw_ctx *hctx;
254 	int i;
255 
256 	lockdep_assert_held(&q->sysfs_lock);
257 
258 	queue_for_each_hw_ctx(q, hctx, i)
259 		blk_mq_unregister_hctx(hctx);
260 
261 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
262 	kobject_del(&q->mq_kobj);
263 	kobject_put(&dev->kobj);
264 
265 	q->mq_sysfs_init_done = false;
266 }
267 
blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx * hctx)268 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
269 {
270 	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
271 }
272 
blk_mq_sysfs_deinit(struct request_queue * q)273 void blk_mq_sysfs_deinit(struct request_queue *q)
274 {
275 	struct blk_mq_ctx *ctx;
276 	int cpu;
277 
278 	for_each_possible_cpu(cpu) {
279 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
280 		kobject_put(&ctx->kobj);
281 	}
282 	kobject_put(&q->mq_kobj);
283 }
284 
blk_mq_sysfs_init(struct request_queue * q)285 void blk_mq_sysfs_init(struct request_queue *q)
286 {
287 	struct blk_mq_ctx *ctx;
288 	int cpu;
289 
290 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
291 
292 	for_each_possible_cpu(cpu) {
293 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
294 		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
295 	}
296 }
297 
__blk_mq_register_dev(struct device * dev,struct request_queue * q)298 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
299 {
300 	struct blk_mq_hw_ctx *hctx;
301 	int ret, i;
302 
303 	WARN_ON_ONCE(!q->kobj.parent);
304 	lockdep_assert_held(&q->sysfs_lock);
305 
306 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
307 	if (ret < 0)
308 		goto out;
309 
310 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
311 
312 	queue_for_each_hw_ctx(q, hctx, i) {
313 		ret = blk_mq_register_hctx(hctx);
314 		if (ret)
315 			goto unreg;
316 	}
317 
318 	q->mq_sysfs_init_done = true;
319 
320 out:
321 	return ret;
322 
323 unreg:
324 	while (--i >= 0)
325 		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
326 
327 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
328 	kobject_del(&q->mq_kobj);
329 	kobject_put(&dev->kobj);
330 	return ret;
331 }
332 
blk_mq_register_dev(struct device * dev,struct request_queue * q)333 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
334 {
335 	int ret;
336 
337 	mutex_lock(&q->sysfs_lock);
338 	ret = __blk_mq_register_dev(dev, q);
339 	mutex_unlock(&q->sysfs_lock);
340 
341 	return ret;
342 }
343 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
344 
blk_mq_sysfs_unregister(struct request_queue * q)345 void blk_mq_sysfs_unregister(struct request_queue *q)
346 {
347 	struct blk_mq_hw_ctx *hctx;
348 	int i;
349 
350 	mutex_lock(&q->sysfs_lock);
351 	if (!q->mq_sysfs_init_done)
352 		goto unlock;
353 
354 	queue_for_each_hw_ctx(q, hctx, i)
355 		blk_mq_unregister_hctx(hctx);
356 
357 unlock:
358 	mutex_unlock(&q->sysfs_lock);
359 }
360 
blk_mq_sysfs_register(struct request_queue * q)361 int blk_mq_sysfs_register(struct request_queue *q)
362 {
363 	struct blk_mq_hw_ctx *hctx;
364 	int i, ret = 0;
365 
366 	mutex_lock(&q->sysfs_lock);
367 	if (!q->mq_sysfs_init_done)
368 		goto unlock;
369 
370 	queue_for_each_hw_ctx(q, hctx, i) {
371 		ret = blk_mq_register_hctx(hctx);
372 		if (ret)
373 			break;
374 	}
375 
376 unlock:
377 	mutex_unlock(&q->sysfs_lock);
378 
379 	return ret;
380 }
381