1 /*
2  * cgroups support for the BFQ I/O scheduler.
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License as
6  *  published by the Free Software Foundation; either version 2 of the
7  *  License, or (at your option) any later version.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  *  General Public License for more details.
13  */
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/cgroup.h>
18 #include <linux/elevator.h>
19 #include <linux/ktime.h>
20 #include <linux/rbtree.h>
21 #include <linux/ioprio.h>
22 #include <linux/sbitmap.h>
23 #include <linux/delay.h>
24 
25 #include "bfq-iosched.h"
26 
27 #if defined(CONFIG_BFQ_GROUP_IOSCHED) &&  defined(CONFIG_DEBUG_BLK_CGROUP)
28 
29 /* bfqg stats flags */
30 enum bfqg_stats_flags {
31 	BFQG_stats_waiting = 0,
32 	BFQG_stats_idling,
33 	BFQG_stats_empty,
34 };
35 
36 #define BFQG_FLAG_FNS(name)						\
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
38 {									\
39 	stats->flags |= (1 << BFQG_stats_##name);			\
40 }									\
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
42 {									\
43 	stats->flags &= ~(1 << BFQG_stats_##name);			\
44 }									\
45 static int bfqg_stats_##name(struct bfqg_stats *stats)		\
46 {									\
47 	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
48 }									\
49 
50 BFQG_FLAG_FNS(waiting)
BFQG_FLAG_FNS(idling)51 BFQG_FLAG_FNS(idling)
52 BFQG_FLAG_FNS(empty)
53 #undef BFQG_FLAG_FNS
54 
55 /* This should be called with the scheduler lock held. */
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57 {
58 	u64 now;
59 
60 	if (!bfqg_stats_waiting(stats))
61 		return;
62 
63 	now = ktime_get_ns();
64 	if (now > stats->start_group_wait_time)
65 		blkg_stat_add(&stats->group_wait_time,
66 			      now - stats->start_group_wait_time);
67 	bfqg_stats_clear_waiting(stats);
68 }
69 
70 /* This should be called with the scheduler lock held. */
bfqg_stats_set_start_group_wait_time(struct bfq_group * bfqg,struct bfq_group * curr_bfqg)71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 						 struct bfq_group *curr_bfqg)
73 {
74 	struct bfqg_stats *stats = &bfqg->stats;
75 
76 	if (bfqg_stats_waiting(stats))
77 		return;
78 	if (bfqg == curr_bfqg)
79 		return;
80 	stats->start_group_wait_time = ktime_get_ns();
81 	bfqg_stats_mark_waiting(stats);
82 }
83 
84 /* This should be called with the scheduler lock held. */
bfqg_stats_end_empty_time(struct bfqg_stats * stats)85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86 {
87 	u64 now;
88 
89 	if (!bfqg_stats_empty(stats))
90 		return;
91 
92 	now = ktime_get_ns();
93 	if (now > stats->start_empty_time)
94 		blkg_stat_add(&stats->empty_time,
95 			      now - stats->start_empty_time);
96 	bfqg_stats_clear_empty(stats);
97 }
98 
bfqg_stats_update_dequeue(struct bfq_group * bfqg)99 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
100 {
101 	blkg_stat_add(&bfqg->stats.dequeue, 1);
102 }
103 
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)104 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
105 {
106 	struct bfqg_stats *stats = &bfqg->stats;
107 
108 	if (blkg_rwstat_total(&stats->queued))
109 		return;
110 
111 	/*
112 	 * group is already marked empty. This can happen if bfqq got new
113 	 * request in parent group and moved to this group while being added
114 	 * to service tree. Just ignore the event and move on.
115 	 */
116 	if (bfqg_stats_empty(stats))
117 		return;
118 
119 	stats->start_empty_time = ktime_get_ns();
120 	bfqg_stats_mark_empty(stats);
121 }
122 
bfqg_stats_update_idle_time(struct bfq_group * bfqg)123 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
124 {
125 	struct bfqg_stats *stats = &bfqg->stats;
126 
127 	if (bfqg_stats_idling(stats)) {
128 		u64 now = ktime_get_ns();
129 
130 		if (now > stats->start_idle_time)
131 			blkg_stat_add(&stats->idle_time,
132 				      now - stats->start_idle_time);
133 		bfqg_stats_clear_idling(stats);
134 	}
135 }
136 
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)137 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
138 {
139 	struct bfqg_stats *stats = &bfqg->stats;
140 
141 	stats->start_idle_time = ktime_get_ns();
142 	bfqg_stats_mark_idling(stats);
143 }
144 
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)145 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
146 {
147 	struct bfqg_stats *stats = &bfqg->stats;
148 
149 	blkg_stat_add(&stats->avg_queue_size_sum,
150 		      blkg_rwstat_total(&stats->queued));
151 	blkg_stat_add(&stats->avg_queue_size_samples, 1);
152 	bfqg_stats_update_group_wait_time(stats);
153 }
154 
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,unsigned int op)155 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
156 			      unsigned int op)
157 {
158 	blkg_rwstat_add(&bfqg->stats.queued, op, 1);
159 	bfqg_stats_end_empty_time(&bfqg->stats);
160 	if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
161 		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
162 }
163 
bfqg_stats_update_io_remove(struct bfq_group * bfqg,unsigned int op)164 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
165 {
166 	blkg_rwstat_add(&bfqg->stats.queued, op, -1);
167 }
168 
bfqg_stats_update_io_merged(struct bfq_group * bfqg,unsigned int op)169 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
170 {
171 	blkg_rwstat_add(&bfqg->stats.merged, op, 1);
172 }
173 
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,unsigned int op)174 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
175 				  u64 io_start_time_ns, unsigned int op)
176 {
177 	struct bfqg_stats *stats = &bfqg->stats;
178 	u64 now = ktime_get_ns();
179 
180 	if (now > io_start_time_ns)
181 		blkg_rwstat_add(&stats->service_time, op,
182 				now - io_start_time_ns);
183 	if (io_start_time_ns > start_time_ns)
184 		blkg_rwstat_add(&stats->wait_time, op,
185 				io_start_time_ns - start_time_ns);
186 }
187 
188 #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
189 
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,unsigned int op)190 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
191 			      unsigned int op) { }
bfqg_stats_update_io_remove(struct bfq_group * bfqg,unsigned int op)192 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
bfqg_stats_update_io_merged(struct bfq_group * bfqg,unsigned int op)193 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,unsigned int op)194 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
195 				  u64 io_start_time_ns, unsigned int op) { }
bfqg_stats_update_dequeue(struct bfq_group * bfqg)196 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)197 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
bfqg_stats_update_idle_time(struct bfq_group * bfqg)198 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)199 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)200 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
201 
202 #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
203 
204 #ifdef CONFIG_BFQ_GROUP_IOSCHED
205 
206 /*
207  * blk-cgroup policy-related handlers
208  * The following functions help in converting between blk-cgroup
209  * internal structures and BFQ-specific structures.
210  */
211 
pd_to_bfqg(struct blkg_policy_data * pd)212 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
213 {
214 	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
215 }
216 
bfqg_to_blkg(struct bfq_group * bfqg)217 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
218 {
219 	return pd_to_blkg(&bfqg->pd);
220 }
221 
blkg_to_bfqg(struct blkcg_gq * blkg)222 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
223 {
224 	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
225 }
226 
227 /*
228  * bfq_group handlers
229  * The following functions help in navigating the bfq_group hierarchy
230  * by allowing to find the parent of a bfq_group or the bfq_group
231  * associated to a bfq_queue.
232  */
233 
bfqg_parent(struct bfq_group * bfqg)234 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
235 {
236 	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
237 
238 	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
239 }
240 
bfqq_group(struct bfq_queue * bfqq)241 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
242 {
243 	struct bfq_entity *group_entity = bfqq->entity.parent;
244 
245 	return group_entity ? container_of(group_entity, struct bfq_group,
246 					   entity) :
247 			      bfqq->bfqd->root_group;
248 }
249 
250 /*
251  * The following two functions handle get and put of a bfq_group by
252  * wrapping the related blk-cgroup hooks.
253  */
254 
bfqg_get(struct bfq_group * bfqg)255 static void bfqg_get(struct bfq_group *bfqg)
256 {
257 	bfqg->ref++;
258 }
259 
bfqg_put(struct bfq_group * bfqg)260 static void bfqg_put(struct bfq_group *bfqg)
261 {
262 	bfqg->ref--;
263 
264 	if (bfqg->ref == 0)
265 		kfree(bfqg);
266 }
267 
bfqg_and_blkg_get(struct bfq_group * bfqg)268 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
269 {
270 	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
271 	bfqg_get(bfqg);
272 
273 	blkg_get(bfqg_to_blkg(bfqg));
274 }
275 
bfqg_and_blkg_put(struct bfq_group * bfqg)276 void bfqg_and_blkg_put(struct bfq_group *bfqg)
277 {
278 	blkg_put(bfqg_to_blkg(bfqg));
279 
280 	bfqg_put(bfqg);
281 }
282 
283 /* @stats = 0 */
bfqg_stats_reset(struct bfqg_stats * stats)284 static void bfqg_stats_reset(struct bfqg_stats *stats)
285 {
286 #ifdef CONFIG_DEBUG_BLK_CGROUP
287 	/* queued stats shouldn't be cleared */
288 	blkg_rwstat_reset(&stats->merged);
289 	blkg_rwstat_reset(&stats->service_time);
290 	blkg_rwstat_reset(&stats->wait_time);
291 	blkg_stat_reset(&stats->time);
292 	blkg_stat_reset(&stats->avg_queue_size_sum);
293 	blkg_stat_reset(&stats->avg_queue_size_samples);
294 	blkg_stat_reset(&stats->dequeue);
295 	blkg_stat_reset(&stats->group_wait_time);
296 	blkg_stat_reset(&stats->idle_time);
297 	blkg_stat_reset(&stats->empty_time);
298 #endif
299 }
300 
301 /* @to += @from */
bfqg_stats_add_aux(struct bfqg_stats * to,struct bfqg_stats * from)302 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
303 {
304 	if (!to || !from)
305 		return;
306 
307 #ifdef CONFIG_DEBUG_BLK_CGROUP
308 	/* queued stats shouldn't be cleared */
309 	blkg_rwstat_add_aux(&to->merged, &from->merged);
310 	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
311 	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
312 	blkg_stat_add_aux(&from->time, &from->time);
313 	blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
314 	blkg_stat_add_aux(&to->avg_queue_size_samples,
315 			  &from->avg_queue_size_samples);
316 	blkg_stat_add_aux(&to->dequeue, &from->dequeue);
317 	blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
318 	blkg_stat_add_aux(&to->idle_time, &from->idle_time);
319 	blkg_stat_add_aux(&to->empty_time, &from->empty_time);
320 #endif
321 }
322 
323 /*
324  * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
325  * recursive stats can still account for the amount used by this bfqg after
326  * it's gone.
327  */
bfqg_stats_xfer_dead(struct bfq_group * bfqg)328 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
329 {
330 	struct bfq_group *parent;
331 
332 	if (!bfqg) /* root_group */
333 		return;
334 
335 	parent = bfqg_parent(bfqg);
336 
337 	lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
338 
339 	if (unlikely(!parent))
340 		return;
341 
342 	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
343 	bfqg_stats_reset(&bfqg->stats);
344 }
345 
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)346 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
347 {
348 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
349 
350 	entity->weight = entity->new_weight;
351 	entity->orig_weight = entity->new_weight;
352 	if (bfqq) {
353 		bfqq->ioprio = bfqq->new_ioprio;
354 		bfqq->ioprio_class = bfqq->new_ioprio_class;
355 		/*
356 		 * Make sure that bfqg and its associated blkg do not
357 		 * disappear before entity.
358 		 */
359 		bfqg_and_blkg_get(bfqg);
360 	}
361 	entity->parent = bfqg->my_entity; /* NULL for root group */
362 	entity->sched_data = &bfqg->sched_data;
363 }
364 
bfqg_stats_exit(struct bfqg_stats * stats)365 static void bfqg_stats_exit(struct bfqg_stats *stats)
366 {
367 #ifdef CONFIG_DEBUG_BLK_CGROUP
368 	blkg_rwstat_exit(&stats->merged);
369 	blkg_rwstat_exit(&stats->service_time);
370 	blkg_rwstat_exit(&stats->wait_time);
371 	blkg_rwstat_exit(&stats->queued);
372 	blkg_stat_exit(&stats->time);
373 	blkg_stat_exit(&stats->avg_queue_size_sum);
374 	blkg_stat_exit(&stats->avg_queue_size_samples);
375 	blkg_stat_exit(&stats->dequeue);
376 	blkg_stat_exit(&stats->group_wait_time);
377 	blkg_stat_exit(&stats->idle_time);
378 	blkg_stat_exit(&stats->empty_time);
379 #endif
380 }
381 
bfqg_stats_init(struct bfqg_stats * stats,gfp_t gfp)382 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
383 {
384 #ifdef CONFIG_DEBUG_BLK_CGROUP
385 	if (blkg_rwstat_init(&stats->merged, gfp) ||
386 	    blkg_rwstat_init(&stats->service_time, gfp) ||
387 	    blkg_rwstat_init(&stats->wait_time, gfp) ||
388 	    blkg_rwstat_init(&stats->queued, gfp) ||
389 	    blkg_stat_init(&stats->time, gfp) ||
390 	    blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
391 	    blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
392 	    blkg_stat_init(&stats->dequeue, gfp) ||
393 	    blkg_stat_init(&stats->group_wait_time, gfp) ||
394 	    blkg_stat_init(&stats->idle_time, gfp) ||
395 	    blkg_stat_init(&stats->empty_time, gfp)) {
396 		bfqg_stats_exit(stats);
397 		return -ENOMEM;
398 	}
399 #endif
400 
401 	return 0;
402 }
403 
cpd_to_bfqgd(struct blkcg_policy_data * cpd)404 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
405 {
406 	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
407 }
408 
blkcg_to_bfqgd(struct blkcg * blkcg)409 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
410 {
411 	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
412 }
413 
bfq_cpd_alloc(gfp_t gfp)414 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
415 {
416 	struct bfq_group_data *bgd;
417 
418 	bgd = kzalloc(sizeof(*bgd), gfp);
419 	if (!bgd)
420 		return NULL;
421 	return &bgd->pd;
422 }
423 
bfq_cpd_init(struct blkcg_policy_data * cpd)424 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
425 {
426 	struct bfq_group_data *d = cpd_to_bfqgd(cpd);
427 
428 	d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
429 		CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
430 }
431 
bfq_cpd_free(struct blkcg_policy_data * cpd)432 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
433 {
434 	kfree(cpd_to_bfqgd(cpd));
435 }
436 
bfq_pd_alloc(gfp_t gfp,int node)437 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
438 {
439 	struct bfq_group *bfqg;
440 
441 	bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
442 	if (!bfqg)
443 		return NULL;
444 
445 	if (bfqg_stats_init(&bfqg->stats, gfp)) {
446 		kfree(bfqg);
447 		return NULL;
448 	}
449 
450 	/* see comments in bfq_bic_update_cgroup for why refcounting */
451 	bfqg_get(bfqg);
452 	return &bfqg->pd;
453 }
454 
bfq_pd_init(struct blkg_policy_data * pd)455 static void bfq_pd_init(struct blkg_policy_data *pd)
456 {
457 	struct blkcg_gq *blkg = pd_to_blkg(pd);
458 	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
459 	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
460 	struct bfq_entity *entity = &bfqg->entity;
461 	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
462 
463 	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
464 	entity->my_sched_data = &bfqg->sched_data;
465 	bfqg->my_entity = entity; /*
466 				   * the root_group's will be set to NULL
467 				   * in bfq_init_queue()
468 				   */
469 	bfqg->bfqd = bfqd;
470 	bfqg->active_entities = 0;
471 	bfqg->rq_pos_tree = RB_ROOT;
472 }
473 
bfq_pd_free(struct blkg_policy_data * pd)474 static void bfq_pd_free(struct blkg_policy_data *pd)
475 {
476 	struct bfq_group *bfqg = pd_to_bfqg(pd);
477 
478 	bfqg_stats_exit(&bfqg->stats);
479 	bfqg_put(bfqg);
480 }
481 
bfq_pd_reset_stats(struct blkg_policy_data * pd)482 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
483 {
484 	struct bfq_group *bfqg = pd_to_bfqg(pd);
485 
486 	bfqg_stats_reset(&bfqg->stats);
487 }
488 
bfq_group_set_parent(struct bfq_group * bfqg,struct bfq_group * parent)489 static void bfq_group_set_parent(struct bfq_group *bfqg,
490 					struct bfq_group *parent)
491 {
492 	struct bfq_entity *entity;
493 
494 	entity = &bfqg->entity;
495 	entity->parent = parent->my_entity;
496 	entity->sched_data = &parent->sched_data;
497 }
498 
bfq_lookup_bfqg(struct bfq_data * bfqd,struct blkcg * blkcg)499 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
500 					 struct blkcg *blkcg)
501 {
502 	struct blkcg_gq *blkg;
503 
504 	blkg = blkg_lookup(blkcg, bfqd->queue);
505 	if (likely(blkg))
506 		return blkg_to_bfqg(blkg);
507 	return NULL;
508 }
509 
bfq_find_set_group(struct bfq_data * bfqd,struct blkcg * blkcg)510 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
511 				     struct blkcg *blkcg)
512 {
513 	struct bfq_group *bfqg, *parent;
514 	struct bfq_entity *entity;
515 
516 	bfqg = bfq_lookup_bfqg(bfqd, blkcg);
517 
518 	if (unlikely(!bfqg))
519 		return NULL;
520 
521 	/*
522 	 * Update chain of bfq_groups as we might be handling a leaf group
523 	 * which, along with some of its relatives, has not been hooked yet
524 	 * to the private hierarchy of BFQ.
525 	 */
526 	entity = &bfqg->entity;
527 	for_each_entity(entity) {
528 		bfqg = container_of(entity, struct bfq_group, entity);
529 		if (bfqg != bfqd->root_group) {
530 			parent = bfqg_parent(bfqg);
531 			if (!parent)
532 				parent = bfqd->root_group;
533 			bfq_group_set_parent(bfqg, parent);
534 		}
535 	}
536 
537 	return bfqg;
538 }
539 
540 /**
541  * bfq_bfqq_move - migrate @bfqq to @bfqg.
542  * @bfqd: queue descriptor.
543  * @bfqq: the queue to move.
544  * @bfqg: the group to move to.
545  *
546  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
547  * it on the new one.  Avoid putting the entity on the old group idle tree.
548  *
549  * Must be called under the scheduler lock, to make sure that the blkg
550  * owning @bfqg does not disappear (see comments in
551  * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
552  * objects).
553  */
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)554 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
555 		   struct bfq_group *bfqg)
556 {
557 	struct bfq_entity *entity = &bfqq->entity;
558 
559 	/* If bfqq is empty, then bfq_bfqq_expire also invokes
560 	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
561 	 * from data structures related to current group. Otherwise we
562 	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
563 	 * we do below.
564 	 */
565 	if (bfqq == bfqd->in_service_queue)
566 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
567 				false, BFQQE_PREEMPTED);
568 
569 	if (bfq_bfqq_busy(bfqq))
570 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
571 	else if (entity->on_st)
572 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
573 	bfqg_and_blkg_put(bfqq_group(bfqq));
574 
575 	entity->parent = bfqg->my_entity;
576 	entity->sched_data = &bfqg->sched_data;
577 	/* pin down bfqg and its associated blkg  */
578 	bfqg_and_blkg_get(bfqg);
579 
580 	if (bfq_bfqq_busy(bfqq)) {
581 		bfq_pos_tree_add_move(bfqd, bfqq);
582 		bfq_activate_bfqq(bfqd, bfqq);
583 	}
584 
585 	if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
586 		bfq_schedule_dispatch(bfqd);
587 }
588 
589 /**
590  * __bfq_bic_change_cgroup - move @bic to @cgroup.
591  * @bfqd: the queue descriptor.
592  * @bic: the bic to move.
593  * @blkcg: the blk-cgroup to move to.
594  *
595  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
596  * sure that the reference to cgroup is valid across the call (see
597  * comments in bfq_bic_update_cgroup on this issue)
598  *
599  * NOTE: an alternative approach might have been to store the current
600  * cgroup in bfqq and getting a reference to it, reducing the lookup
601  * time here, at the price of slightly more complex code.
602  */
__bfq_bic_change_cgroup(struct bfq_data * bfqd,struct bfq_io_cq * bic,struct blkcg * blkcg)603 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
604 						struct bfq_io_cq *bic,
605 						struct blkcg *blkcg)
606 {
607 	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
608 	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
609 	struct bfq_group *bfqg;
610 	struct bfq_entity *entity;
611 
612 	bfqg = bfq_find_set_group(bfqd, blkcg);
613 
614 	if (unlikely(!bfqg))
615 		bfqg = bfqd->root_group;
616 
617 	if (async_bfqq) {
618 		entity = &async_bfqq->entity;
619 
620 		if (entity->sched_data != &bfqg->sched_data) {
621 			bic_set_bfqq(bic, NULL, 0);
622 			bfq_log_bfqq(bfqd, async_bfqq,
623 				     "bic_change_group: %p %d",
624 				     async_bfqq, async_bfqq->ref);
625 			bfq_put_queue(async_bfqq);
626 		}
627 	}
628 
629 	if (sync_bfqq) {
630 		entity = &sync_bfqq->entity;
631 		if (entity->sched_data != &bfqg->sched_data)
632 			bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
633 	}
634 
635 	return bfqg;
636 }
637 
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)638 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
639 {
640 	struct bfq_data *bfqd = bic_to_bfqd(bic);
641 	struct bfq_group *bfqg = NULL;
642 	uint64_t serial_nr;
643 
644 	rcu_read_lock();
645 	serial_nr = bio_blkcg(bio)->css.serial_nr;
646 
647 	/*
648 	 * Check whether blkcg has changed.  The condition may trigger
649 	 * spuriously on a newly created cic but there's no harm.
650 	 */
651 	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
652 		goto out;
653 
654 	bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
655 	/*
656 	 * Update blkg_path for bfq_log_* functions. We cache this
657 	 * path, and update it here, for the following
658 	 * reasons. Operations on blkg objects in blk-cgroup are
659 	 * protected with the request_queue lock, and not with the
660 	 * lock that protects the instances of this scheduler
661 	 * (bfqd->lock). This exposes BFQ to the following sort of
662 	 * race.
663 	 *
664 	 * The blkg_lookup performed in bfq_get_queue, protected
665 	 * through rcu, may happen to return the address of a copy of
666 	 * the original blkg. If this is the case, then the
667 	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
668 	 * the blkg, is useless: it does not prevent blk-cgroup code
669 	 * from destroying both the original blkg and all objects
670 	 * directly or indirectly referred by the copy of the
671 	 * blkg.
672 	 *
673 	 * On the bright side, destroy operations on a blkg invoke, as
674 	 * a first step, hooks of the scheduler associated with the
675 	 * blkg. And these hooks are executed with bfqd->lock held for
676 	 * BFQ. As a consequence, for any blkg associated with the
677 	 * request queue this instance of the scheduler is attached
678 	 * to, we are guaranteed that such a blkg is not destroyed, and
679 	 * that all the pointers it contains are consistent, while we
680 	 * are holding bfqd->lock. A blkg_lookup performed with
681 	 * bfqd->lock held then returns a fully consistent blkg, which
682 	 * remains consistent until this lock is held.
683 	 *
684 	 * Thanks to the last fact, and to the fact that: (1) bfqg has
685 	 * been obtained through a blkg_lookup in the above
686 	 * assignment, and (2) bfqd->lock is being held, here we can
687 	 * safely use the policy data for the involved blkg (i.e., the
688 	 * field bfqg->pd) to get to the blkg associated with bfqg,
689 	 * and then we can safely use any field of blkg. After we
690 	 * release bfqd->lock, even just getting blkg through this
691 	 * bfqg may cause dangling references to be traversed, as
692 	 * bfqg->pd may not exist any more.
693 	 *
694 	 * In view of the above facts, here we cache, in the bfqg, any
695 	 * blkg data we may need for this bic, and for its associated
696 	 * bfq_queue. As of now, we need to cache only the path of the
697 	 * blkg, which is used in the bfq_log_* functions.
698 	 *
699 	 * Finally, note that bfqg itself needs to be protected from
700 	 * destruction on the blkg_free of the original blkg (which
701 	 * invokes bfq_pd_free). We use an additional private
702 	 * refcounter for bfqg, to let it disappear only after no
703 	 * bfq_queue refers to it any longer.
704 	 */
705 	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
706 	bic->blkcg_serial_nr = serial_nr;
707 out:
708 	rcu_read_unlock();
709 }
710 
711 /**
712  * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
713  * @st: the service tree being flushed.
714  */
bfq_flush_idle_tree(struct bfq_service_tree * st)715 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
716 {
717 	struct bfq_entity *entity = st->first_idle;
718 
719 	for (; entity ; entity = st->first_idle)
720 		__bfq_deactivate_entity(entity, false);
721 }
722 
723 /**
724  * bfq_reparent_leaf_entity - move leaf entity to the root_group.
725  * @bfqd: the device data structure with the root group.
726  * @entity: the entity to move.
727  */
bfq_reparent_leaf_entity(struct bfq_data * bfqd,struct bfq_entity * entity)728 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
729 				     struct bfq_entity *entity)
730 {
731 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
732 
733 	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
734 }
735 
736 /**
737  * bfq_reparent_active_entities - move to the root group all active
738  *                                entities.
739  * @bfqd: the device data structure with the root group.
740  * @bfqg: the group to move from.
741  * @st: the service tree with the entities.
742  */
bfq_reparent_active_entities(struct bfq_data * bfqd,struct bfq_group * bfqg,struct bfq_service_tree * st)743 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
744 					 struct bfq_group *bfqg,
745 					 struct bfq_service_tree *st)
746 {
747 	struct rb_root *active = &st->active;
748 	struct bfq_entity *entity = NULL;
749 
750 	if (!RB_EMPTY_ROOT(&st->active))
751 		entity = bfq_entity_of(rb_first(active));
752 
753 	for (; entity ; entity = bfq_entity_of(rb_first(active)))
754 		bfq_reparent_leaf_entity(bfqd, entity);
755 
756 	if (bfqg->sched_data.in_service_entity)
757 		bfq_reparent_leaf_entity(bfqd,
758 			bfqg->sched_data.in_service_entity);
759 }
760 
761 /**
762  * bfq_pd_offline - deactivate the entity associated with @pd,
763  *		    and reparent its children entities.
764  * @pd: descriptor of the policy going offline.
765  *
766  * blkio already grabs the queue_lock for us, so no need to use
767  * RCU-based magic
768  */
bfq_pd_offline(struct blkg_policy_data * pd)769 static void bfq_pd_offline(struct blkg_policy_data *pd)
770 {
771 	struct bfq_service_tree *st;
772 	struct bfq_group *bfqg = pd_to_bfqg(pd);
773 	struct bfq_data *bfqd = bfqg->bfqd;
774 	struct bfq_entity *entity = bfqg->my_entity;
775 	unsigned long flags;
776 	int i;
777 
778 	spin_lock_irqsave(&bfqd->lock, flags);
779 
780 	if (!entity) /* root group */
781 		goto put_async_queues;
782 
783 	/*
784 	 * Empty all service_trees belonging to this group before
785 	 * deactivating the group itself.
786 	 */
787 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
788 		st = bfqg->sched_data.service_tree + i;
789 
790 		/*
791 		 * The idle tree may still contain bfq_queues belonging
792 		 * to exited task because they never migrated to a different
793 		 * cgroup from the one being destroyed now.
794 		 */
795 		bfq_flush_idle_tree(st);
796 
797 		/*
798 		 * It may happen that some queues are still active
799 		 * (busy) upon group destruction (if the corresponding
800 		 * processes have been forced to terminate). We move
801 		 * all the leaf entities corresponding to these queues
802 		 * to the root_group.
803 		 * Also, it may happen that the group has an entity
804 		 * in service, which is disconnected from the active
805 		 * tree: it must be moved, too.
806 		 * There is no need to put the sync queues, as the
807 		 * scheduler has taken no reference.
808 		 */
809 		bfq_reparent_active_entities(bfqd, bfqg, st);
810 	}
811 
812 	__bfq_deactivate_entity(entity, false);
813 
814 put_async_queues:
815 	bfq_put_async_queues(bfqd, bfqg);
816 
817 	spin_unlock_irqrestore(&bfqd->lock, flags);
818 	/*
819 	 * @blkg is going offline and will be ignored by
820 	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
821 	 * that they don't get lost.  If IOs complete after this point, the
822 	 * stats for them will be lost.  Oh well...
823 	 */
824 	bfqg_stats_xfer_dead(bfqg);
825 }
826 
bfq_end_wr_async(struct bfq_data * bfqd)827 void bfq_end_wr_async(struct bfq_data *bfqd)
828 {
829 	struct blkcg_gq *blkg;
830 
831 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
832 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
833 
834 		bfq_end_wr_async_queues(bfqd, bfqg);
835 	}
836 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
837 }
838 
bfq_io_show_weight(struct seq_file * sf,void * v)839 static int bfq_io_show_weight(struct seq_file *sf, void *v)
840 {
841 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
842 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
843 	unsigned int val = 0;
844 
845 	if (bfqgd)
846 		val = bfqgd->weight;
847 
848 	seq_printf(sf, "%u\n", val);
849 
850 	return 0;
851 }
852 
bfq_io_set_weight_legacy(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)853 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
854 				    struct cftype *cftype,
855 				    u64 val)
856 {
857 	struct blkcg *blkcg = css_to_blkcg(css);
858 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
859 	struct blkcg_gq *blkg;
860 	int ret = -ERANGE;
861 
862 	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
863 		return ret;
864 
865 	ret = 0;
866 	spin_lock_irq(&blkcg->lock);
867 	bfqgd->weight = (unsigned short)val;
868 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
869 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
870 
871 		if (!bfqg)
872 			continue;
873 		/*
874 		 * Setting the prio_changed flag of the entity
875 		 * to 1 with new_weight == weight would re-set
876 		 * the value of the weight to its ioprio mapping.
877 		 * Set the flag only if necessary.
878 		 */
879 		if ((unsigned short)val != bfqg->entity.new_weight) {
880 			bfqg->entity.new_weight = (unsigned short)val;
881 			/*
882 			 * Make sure that the above new value has been
883 			 * stored in bfqg->entity.new_weight before
884 			 * setting the prio_changed flag. In fact,
885 			 * this flag may be read asynchronously (in
886 			 * critical sections protected by a different
887 			 * lock than that held here), and finding this
888 			 * flag set may cause the execution of the code
889 			 * for updating parameters whose value may
890 			 * depend also on bfqg->entity.new_weight (in
891 			 * __bfq_entity_update_weight_prio).
892 			 * This barrier makes sure that the new value
893 			 * of bfqg->entity.new_weight is correctly
894 			 * seen in that code.
895 			 */
896 			smp_wmb();
897 			bfqg->entity.prio_changed = 1;
898 		}
899 	}
900 	spin_unlock_irq(&blkcg->lock);
901 
902 	return ret;
903 }
904 
bfq_io_set_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)905 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
906 				 char *buf, size_t nbytes,
907 				 loff_t off)
908 {
909 	u64 weight;
910 	/* First unsigned long found in the file is used */
911 	int ret = kstrtoull(strim(buf), 0, &weight);
912 
913 	if (ret)
914 		return ret;
915 
916 	ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
917 	return ret ?: nbytes;
918 }
919 
920 #ifdef CONFIG_DEBUG_BLK_CGROUP
bfqg_print_stat(struct seq_file * sf,void * v)921 static int bfqg_print_stat(struct seq_file *sf, void *v)
922 {
923 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
924 			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
925 	return 0;
926 }
927 
bfqg_print_rwstat(struct seq_file * sf,void * v)928 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
929 {
930 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
931 			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
932 	return 0;
933 }
934 
bfqg_prfill_stat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)935 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
936 				      struct blkg_policy_data *pd, int off)
937 {
938 	u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
939 					  &blkcg_policy_bfq, off);
940 	return __blkg_prfill_u64(sf, pd, sum);
941 }
942 
bfqg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)943 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
944 					struct blkg_policy_data *pd, int off)
945 {
946 	struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
947 							   &blkcg_policy_bfq,
948 							   off);
949 	return __blkg_prfill_rwstat(sf, pd, &sum);
950 }
951 
bfqg_print_stat_recursive(struct seq_file * sf,void * v)952 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
953 {
954 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
955 			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
956 			  seq_cft(sf)->private, false);
957 	return 0;
958 }
959 
bfqg_print_rwstat_recursive(struct seq_file * sf,void * v)960 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
961 {
962 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
963 			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
964 			  seq_cft(sf)->private, true);
965 	return 0;
966 }
967 
bfqg_prfill_sectors(struct seq_file * sf,struct blkg_policy_data * pd,int off)968 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
969 			       int off)
970 {
971 	u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
972 
973 	return __blkg_prfill_u64(sf, pd, sum >> 9);
974 }
975 
bfqg_print_stat_sectors(struct seq_file * sf,void * v)976 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
977 {
978 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
979 			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
980 	return 0;
981 }
982 
bfqg_prfill_sectors_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)983 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
984 					 struct blkg_policy_data *pd, int off)
985 {
986 	struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
987 					offsetof(struct blkcg_gq, stat_bytes));
988 	u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
989 		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
990 
991 	return __blkg_prfill_u64(sf, pd, sum >> 9);
992 }
993 
bfqg_print_stat_sectors_recursive(struct seq_file * sf,void * v)994 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
995 {
996 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
997 			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
998 			  false);
999 	return 0;
1000 }
1001 
bfqg_prfill_avg_queue_size(struct seq_file * sf,struct blkg_policy_data * pd,int off)1002 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1003 				      struct blkg_policy_data *pd, int off)
1004 {
1005 	struct bfq_group *bfqg = pd_to_bfqg(pd);
1006 	u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
1007 	u64 v = 0;
1008 
1009 	if (samples) {
1010 		v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
1011 		v = div64_u64(v, samples);
1012 	}
1013 	__blkg_prfill_u64(sf, pd, v);
1014 	return 0;
1015 }
1016 
1017 /* print avg_queue_size */
bfqg_print_avg_queue_size(struct seq_file * sf,void * v)1018 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1019 {
1020 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1021 			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1022 			  0, false);
1023 	return 0;
1024 }
1025 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1026 
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1027 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1028 {
1029 	int ret;
1030 
1031 	ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1032 	if (ret)
1033 		return NULL;
1034 
1035 	return blkg_to_bfqg(bfqd->queue->root_blkg);
1036 }
1037 
1038 struct blkcg_policy blkcg_policy_bfq = {
1039 	.dfl_cftypes		= bfq_blkg_files,
1040 	.legacy_cftypes		= bfq_blkcg_legacy_files,
1041 
1042 	.cpd_alloc_fn		= bfq_cpd_alloc,
1043 	.cpd_init_fn		= bfq_cpd_init,
1044 	.cpd_bind_fn	        = bfq_cpd_init,
1045 	.cpd_free_fn		= bfq_cpd_free,
1046 
1047 	.pd_alloc_fn		= bfq_pd_alloc,
1048 	.pd_init_fn		= bfq_pd_init,
1049 	.pd_offline_fn		= bfq_pd_offline,
1050 	.pd_free_fn		= bfq_pd_free,
1051 	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1052 };
1053 
1054 struct cftype bfq_blkcg_legacy_files[] = {
1055 	{
1056 		.name = "bfq.weight",
1057 		.flags = CFTYPE_NOT_ON_ROOT,
1058 		.seq_show = bfq_io_show_weight,
1059 		.write_u64 = bfq_io_set_weight_legacy,
1060 	},
1061 
1062 	/* statistics, covers only the tasks in the bfqg */
1063 	{
1064 		.name = "bfq.io_service_bytes",
1065 		.private = (unsigned long)&blkcg_policy_bfq,
1066 		.seq_show = blkg_print_stat_bytes,
1067 	},
1068 	{
1069 		.name = "bfq.io_serviced",
1070 		.private = (unsigned long)&blkcg_policy_bfq,
1071 		.seq_show = blkg_print_stat_ios,
1072 	},
1073 #ifdef CONFIG_DEBUG_BLK_CGROUP
1074 	{
1075 		.name = "bfq.time",
1076 		.private = offsetof(struct bfq_group, stats.time),
1077 		.seq_show = bfqg_print_stat,
1078 	},
1079 	{
1080 		.name = "bfq.sectors",
1081 		.seq_show = bfqg_print_stat_sectors,
1082 	},
1083 	{
1084 		.name = "bfq.io_service_time",
1085 		.private = offsetof(struct bfq_group, stats.service_time),
1086 		.seq_show = bfqg_print_rwstat,
1087 	},
1088 	{
1089 		.name = "bfq.io_wait_time",
1090 		.private = offsetof(struct bfq_group, stats.wait_time),
1091 		.seq_show = bfqg_print_rwstat,
1092 	},
1093 	{
1094 		.name = "bfq.io_merged",
1095 		.private = offsetof(struct bfq_group, stats.merged),
1096 		.seq_show = bfqg_print_rwstat,
1097 	},
1098 	{
1099 		.name = "bfq.io_queued",
1100 		.private = offsetof(struct bfq_group, stats.queued),
1101 		.seq_show = bfqg_print_rwstat,
1102 	},
1103 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1104 
1105 	/* the same statictics which cover the bfqg and its descendants */
1106 	{
1107 		.name = "bfq.io_service_bytes_recursive",
1108 		.private = (unsigned long)&blkcg_policy_bfq,
1109 		.seq_show = blkg_print_stat_bytes_recursive,
1110 	},
1111 	{
1112 		.name = "bfq.io_serviced_recursive",
1113 		.private = (unsigned long)&blkcg_policy_bfq,
1114 		.seq_show = blkg_print_stat_ios_recursive,
1115 	},
1116 #ifdef CONFIG_DEBUG_BLK_CGROUP
1117 	{
1118 		.name = "bfq.time_recursive",
1119 		.private = offsetof(struct bfq_group, stats.time),
1120 		.seq_show = bfqg_print_stat_recursive,
1121 	},
1122 	{
1123 		.name = "bfq.sectors_recursive",
1124 		.seq_show = bfqg_print_stat_sectors_recursive,
1125 	},
1126 	{
1127 		.name = "bfq.io_service_time_recursive",
1128 		.private = offsetof(struct bfq_group, stats.service_time),
1129 		.seq_show = bfqg_print_rwstat_recursive,
1130 	},
1131 	{
1132 		.name = "bfq.io_wait_time_recursive",
1133 		.private = offsetof(struct bfq_group, stats.wait_time),
1134 		.seq_show = bfqg_print_rwstat_recursive,
1135 	},
1136 	{
1137 		.name = "bfq.io_merged_recursive",
1138 		.private = offsetof(struct bfq_group, stats.merged),
1139 		.seq_show = bfqg_print_rwstat_recursive,
1140 	},
1141 	{
1142 		.name = "bfq.io_queued_recursive",
1143 		.private = offsetof(struct bfq_group, stats.queued),
1144 		.seq_show = bfqg_print_rwstat_recursive,
1145 	},
1146 	{
1147 		.name = "bfq.avg_queue_size",
1148 		.seq_show = bfqg_print_avg_queue_size,
1149 	},
1150 	{
1151 		.name = "bfq.group_wait_time",
1152 		.private = offsetof(struct bfq_group, stats.group_wait_time),
1153 		.seq_show = bfqg_print_stat,
1154 	},
1155 	{
1156 		.name = "bfq.idle_time",
1157 		.private = offsetof(struct bfq_group, stats.idle_time),
1158 		.seq_show = bfqg_print_stat,
1159 	},
1160 	{
1161 		.name = "bfq.empty_time",
1162 		.private = offsetof(struct bfq_group, stats.empty_time),
1163 		.seq_show = bfqg_print_stat,
1164 	},
1165 	{
1166 		.name = "bfq.dequeue",
1167 		.private = offsetof(struct bfq_group, stats.dequeue),
1168 		.seq_show = bfqg_print_stat,
1169 	},
1170 #endif	/* CONFIG_DEBUG_BLK_CGROUP */
1171 	{ }	/* terminate */
1172 };
1173 
1174 struct cftype bfq_blkg_files[] = {
1175 	{
1176 		.name = "bfq.weight",
1177 		.flags = CFTYPE_NOT_ON_ROOT,
1178 		.seq_show = bfq_io_show_weight,
1179 		.write = bfq_io_set_weight,
1180 	},
1181 	{} /* terminate */
1182 };
1183 
1184 #else	/* CONFIG_BFQ_GROUP_IOSCHED */
1185 
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)1186 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1187 		   struct bfq_group *bfqg) {}
1188 
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)1189 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1190 {
1191 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1192 
1193 	entity->weight = entity->new_weight;
1194 	entity->orig_weight = entity->new_weight;
1195 	if (bfqq) {
1196 		bfqq->ioprio = bfqq->new_ioprio;
1197 		bfqq->ioprio_class = bfqq->new_ioprio_class;
1198 	}
1199 	entity->sched_data = &bfqg->sched_data;
1200 }
1201 
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)1202 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1203 
bfq_end_wr_async(struct bfq_data * bfqd)1204 void bfq_end_wr_async(struct bfq_data *bfqd)
1205 {
1206 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1207 }
1208 
bfq_find_set_group(struct bfq_data * bfqd,struct blkcg * blkcg)1209 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1210 {
1211 	return bfqd->root_group;
1212 }
1213 
bfqq_group(struct bfq_queue * bfqq)1214 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1215 {
1216 	return bfqq->bfqd->root_group;
1217 }
1218 
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1219 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1220 {
1221 	struct bfq_group *bfqg;
1222 	int i;
1223 
1224 	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1225 	if (!bfqg)
1226 		return NULL;
1227 
1228 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1229 		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1230 
1231 	return bfqg;
1232 }
1233 #endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1234