1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
24 #include <linux/fs.h>
25
26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
28
29 /* Max limits for throttle policy */
30 #define THROTL_IOPS_MAX UINT_MAX
31
32 #ifdef CONFIG_BLK_CGROUP
33
34 enum blkg_rwstat_type {
35 BLKG_RWSTAT_READ,
36 BLKG_RWSTAT_WRITE,
37 BLKG_RWSTAT_SYNC,
38 BLKG_RWSTAT_ASYNC,
39 BLKG_RWSTAT_DISCARD,
40
41 BLKG_RWSTAT_NR,
42 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
43 };
44
45 struct blkcg_gq;
46
47 struct blkcg {
48 struct cgroup_subsys_state css;
49 spinlock_t lock;
50
51 struct radix_tree_root blkg_tree;
52 struct blkcg_gq __rcu *blkg_hint;
53 struct hlist_head blkg_list;
54
55 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
56
57 struct list_head all_blkcgs_node;
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list;
60 refcount_t cgwb_refcnt;
61 #endif
62 };
63
64 /*
65 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
66 * recursive. Used to carry stats of dead children.
67 */
68 struct blkg_rwstat {
69 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
70 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
71 };
72
73 struct blkg_rwstat_sample {
74 u64 cnt[BLKG_RWSTAT_NR];
75 };
76
77 /*
78 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
79 * request_queue (q). This is used by blkcg policies which need to track
80 * information per blkcg - q pair.
81 *
82 * There can be multiple active blkcg policies and each blkg:policy pair is
83 * represented by a blkg_policy_data which is allocated and freed by each
84 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
85 * area by allocating larger data structure which embeds blkg_policy_data
86 * at the beginning.
87 */
88 struct blkg_policy_data {
89 /* the blkg and policy id this per-policy data belongs to */
90 struct blkcg_gq *blkg;
91 int plid;
92 };
93
94 /*
95 * Policies that need to keep per-blkcg data which is independent from any
96 * request_queue associated to it should implement cpd_alloc/free_fn()
97 * methods. A policy can allocate private data area by allocating larger
98 * data structure which embeds blkcg_policy_data at the beginning.
99 * cpd_init() is invoked to let each policy handle per-blkcg data.
100 */
101 struct blkcg_policy_data {
102 /* the blkcg and policy id this per-policy data belongs to */
103 struct blkcg *blkcg;
104 int plid;
105 };
106
107 /* association between a blk cgroup and a request queue */
108 struct blkcg_gq {
109 /* Pointer to the associated request_queue */
110 struct request_queue *q;
111 struct list_head q_node;
112 struct hlist_node blkcg_node;
113 struct blkcg *blkcg;
114
115 /*
116 * Each blkg gets congested separately and the congestion state is
117 * propagated to the matching bdi_writeback_congested.
118 */
119 struct bdi_writeback_congested *wb_congested;
120
121 /* all non-root blkcg_gq's are guaranteed to have access to parent */
122 struct blkcg_gq *parent;
123
124 /* reference count */
125 struct percpu_ref refcnt;
126
127 /* is this blkg online? protected by both blkcg and q locks */
128 bool online;
129
130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
132
133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
134
135 spinlock_t async_bio_lock;
136 struct bio_list async_bios;
137 struct work_struct async_bio_work;
138
139 atomic_t use_delay;
140 atomic64_t delay_nsec;
141 atomic64_t delay_start;
142 u64 last_delay;
143 int last_use;
144
145 struct rcu_head rcu_head;
146 };
147
148 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
149 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
150 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
151 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
152 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
153 struct request_queue *q, struct blkcg *blkcg);
154 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
155 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
156 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
157 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
158 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
159 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
160 size_t size);
161
162 struct blkcg_policy {
163 int plid;
164 /* cgroup files for the policy */
165 struct cftype *dfl_cftypes;
166 struct cftype *legacy_cftypes;
167
168 /* operations */
169 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
170 blkcg_pol_init_cpd_fn *cpd_init_fn;
171 blkcg_pol_free_cpd_fn *cpd_free_fn;
172 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
173
174 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
175 blkcg_pol_init_pd_fn *pd_init_fn;
176 blkcg_pol_online_pd_fn *pd_online_fn;
177 blkcg_pol_offline_pd_fn *pd_offline_fn;
178 blkcg_pol_free_pd_fn *pd_free_fn;
179 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
180 blkcg_pol_stat_pd_fn *pd_stat_fn;
181 };
182
183 extern struct blkcg blkcg_root;
184 extern struct cgroup_subsys_state * const blkcg_root_css;
185 extern bool blkcg_debug_stats;
186
187 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
188 struct request_queue *q, bool update_hint);
189 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
190 struct request_queue *q);
191 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
192 struct request_queue *q);
193 int blkcg_init_queue(struct request_queue *q);
194 void blkcg_drain_queue(struct request_queue *q);
195 void blkcg_exit_queue(struct request_queue *q);
196
197 /* Blkio controller policy registration */
198 int blkcg_policy_register(struct blkcg_policy *pol);
199 void blkcg_policy_unregister(struct blkcg_policy *pol);
200 int blkcg_activate_policy(struct request_queue *q,
201 const struct blkcg_policy *pol);
202 void blkcg_deactivate_policy(struct request_queue *q,
203 const struct blkcg_policy *pol);
204
blkg_rwstat_read_counter(struct blkg_rwstat * rwstat,unsigned int idx)205 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
206 unsigned int idx)
207 {
208 return atomic64_read(&rwstat->aux_cnt[idx]) +
209 percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
210 }
211
212 const char *blkg_dev_name(struct blkcg_gq *blkg);
213 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
214 u64 (*prfill)(struct seq_file *,
215 struct blkg_policy_data *, int),
216 const struct blkcg_policy *pol, int data,
217 bool show_total);
218 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
219 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
220 const struct blkg_rwstat_sample *rwstat);
221 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
222 int off);
223 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
224 int blkg_print_stat_ios(struct seq_file *sf, void *v);
225 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
226 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
227
228 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
229 int off, struct blkg_rwstat_sample *sum);
230
231 struct blkg_conf_ctx {
232 struct gendisk *disk;
233 struct blkcg_gq *blkg;
234 char *body;
235 };
236
237 struct gendisk *blkcg_conf_get_disk(char **inputp);
238 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
239 char *input, struct blkg_conf_ctx *ctx);
240 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
241
242 /**
243 * blkcg_css - find the current css
244 *
245 * Find the css associated with either the kthread or the current task.
246 * This may return a dying css, so it is up to the caller to use tryget logic
247 * to confirm it is alive and well.
248 */
blkcg_css(void)249 static inline struct cgroup_subsys_state *blkcg_css(void)
250 {
251 struct cgroup_subsys_state *css;
252
253 css = kthread_blkcg();
254 if (css)
255 return css;
256 return task_css(current, io_cgrp_id);
257 }
258
css_to_blkcg(struct cgroup_subsys_state * css)259 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
260 {
261 return css ? container_of(css, struct blkcg, css) : NULL;
262 }
263
264 /**
265 * __bio_blkcg - internal, inconsistent version to get blkcg
266 *
267 * DO NOT USE.
268 * This function is inconsistent and consequently is dangerous to use. The
269 * first part of the function returns a blkcg where a reference is owned by the
270 * bio. This means it does not need to be rcu protected as it cannot go away
271 * with the bio owning a reference to it. However, the latter potentially gets
272 * it from task_css(). This can race against task migration and the cgroup
273 * dying. It is also semantically different as it must be called rcu protected
274 * and is susceptible to failure when trying to get a reference to it.
275 * Therefore, it is not ok to assume that *_get() will always succeed on the
276 * blkcg returned here.
277 */
__bio_blkcg(struct bio * bio)278 static inline struct blkcg *__bio_blkcg(struct bio *bio)
279 {
280 if (bio && bio->bi_blkg)
281 return bio->bi_blkg->blkcg;
282 return css_to_blkcg(blkcg_css());
283 }
284
285 /**
286 * bio_blkcg - grab the blkcg associated with a bio
287 * @bio: target bio
288 *
289 * This returns the blkcg associated with a bio, %NULL if not associated.
290 * Callers are expected to either handle %NULL or know association has been
291 * done prior to calling this.
292 */
bio_blkcg(struct bio * bio)293 static inline struct blkcg *bio_blkcg(struct bio *bio)
294 {
295 if (bio && bio->bi_blkg)
296 return bio->bi_blkg->blkcg;
297 return NULL;
298 }
299
blk_cgroup_congested(void)300 static inline bool blk_cgroup_congested(void)
301 {
302 struct cgroup_subsys_state *css;
303 bool ret = false;
304
305 rcu_read_lock();
306 css = kthread_blkcg();
307 if (!css)
308 css = task_css(current, io_cgrp_id);
309 while (css) {
310 if (atomic_read(&css->cgroup->congestion_count)) {
311 ret = true;
312 break;
313 }
314 css = css->parent;
315 }
316 rcu_read_unlock();
317 return ret;
318 }
319
320 /**
321 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
322 * @return: true if this bio needs to be submitted with the root blkg context.
323 *
324 * In order to avoid priority inversions we sometimes need to issue a bio as if
325 * it were attached to the root blkg, and then backcharge to the actual owning
326 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
327 * bio and attach the appropriate blkg to the bio. Then we call this helper and
328 * if it is true run with the root blkg for that queue and then do any
329 * backcharging to the originating cgroup once the io is complete.
330 */
bio_issue_as_root_blkg(struct bio * bio)331 static inline bool bio_issue_as_root_blkg(struct bio *bio)
332 {
333 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
334 }
335
336 /**
337 * blkcg_parent - get the parent of a blkcg
338 * @blkcg: blkcg of interest
339 *
340 * Return the parent blkcg of @blkcg. Can be called anytime.
341 */
blkcg_parent(struct blkcg * blkcg)342 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
343 {
344 return css_to_blkcg(blkcg->css.parent);
345 }
346
347 /**
348 * __blkg_lookup - internal version of blkg_lookup()
349 * @blkcg: blkcg of interest
350 * @q: request_queue of interest
351 * @update_hint: whether to update lookup hint with the result or not
352 *
353 * This is internal version and shouldn't be used by policy
354 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
355 * @q's bypass state. If @update_hint is %true, the caller should be
356 * holding @q->queue_lock and lookup hint is updated on success.
357 */
__blkg_lookup(struct blkcg * blkcg,struct request_queue * q,bool update_hint)358 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
359 struct request_queue *q,
360 bool update_hint)
361 {
362 struct blkcg_gq *blkg;
363
364 if (blkcg == &blkcg_root)
365 return q->root_blkg;
366
367 blkg = rcu_dereference(blkcg->blkg_hint);
368 if (blkg && blkg->q == q)
369 return blkg;
370
371 return blkg_lookup_slowpath(blkcg, q, update_hint);
372 }
373
374 /**
375 * blkg_lookup - lookup blkg for the specified blkcg - q pair
376 * @blkcg: blkcg of interest
377 * @q: request_queue of interest
378 *
379 * Lookup blkg for the @blkcg - @q pair. This function should be called
380 * under RCU read lock.
381 */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)382 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
383 struct request_queue *q)
384 {
385 WARN_ON_ONCE(!rcu_read_lock_held());
386 return __blkg_lookup(blkcg, q, false);
387 }
388
389 /**
390 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
391 * @q: request_queue of interest
392 *
393 * Lookup blkg for @q at the root level. See also blkg_lookup().
394 */
blk_queue_root_blkg(struct request_queue * q)395 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
396 {
397 return q->root_blkg;
398 }
399
400 /**
401 * blkg_to_pdata - get policy private data
402 * @blkg: blkg of interest
403 * @pol: policy of interest
404 *
405 * Return pointer to private data associated with the @blkg-@pol pair.
406 */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)407 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
408 struct blkcg_policy *pol)
409 {
410 return blkg ? blkg->pd[pol->plid] : NULL;
411 }
412
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)413 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
414 struct blkcg_policy *pol)
415 {
416 return blkcg ? blkcg->cpd[pol->plid] : NULL;
417 }
418
419 /**
420 * pdata_to_blkg - get blkg associated with policy private data
421 * @pd: policy private data of interest
422 *
423 * @pd is policy private data. Determine the blkg it's associated with.
424 */
pd_to_blkg(struct blkg_policy_data * pd)425 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
426 {
427 return pd ? pd->blkg : NULL;
428 }
429
cpd_to_blkcg(struct blkcg_policy_data * cpd)430 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
431 {
432 return cpd ? cpd->blkcg : NULL;
433 }
434
435 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
436
437 #ifdef CONFIG_CGROUP_WRITEBACK
438
439 /**
440 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
441 * @blkcg: blkcg of interest
442 *
443 * This is used to track the number of active wb's related to a blkcg.
444 */
blkcg_cgwb_get(struct blkcg * blkcg)445 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
446 {
447 refcount_inc(&blkcg->cgwb_refcnt);
448 }
449
450 /**
451 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
452 * @blkcg: blkcg of interest
453 *
454 * This is used to track the number of active wb's related to a blkcg.
455 * When this count goes to zero, all active wb has finished so the
456 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
457 * This work may occur in cgwb_release_workfn() on the cgwb_release
458 * workqueue.
459 */
blkcg_cgwb_put(struct blkcg * blkcg)460 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
461 {
462 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
463 blkcg_destroy_blkgs(blkcg);
464 }
465
466 #else
467
blkcg_cgwb_get(struct blkcg * blkcg)468 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
469
blkcg_cgwb_put(struct blkcg * blkcg)470 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
471 {
472 /* wb isn't being accounted, so trigger destruction right away */
473 blkcg_destroy_blkgs(blkcg);
474 }
475
476 #endif
477
478 /**
479 * blkg_path - format cgroup path of blkg
480 * @blkg: blkg of interest
481 * @buf: target buffer
482 * @buflen: target buffer length
483 *
484 * Format the path of the cgroup of @blkg into @buf.
485 */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)486 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
487 {
488 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
489 }
490
491 /**
492 * blkg_get - get a blkg reference
493 * @blkg: blkg to get
494 *
495 * The caller should be holding an existing reference.
496 */
blkg_get(struct blkcg_gq * blkg)497 static inline void blkg_get(struct blkcg_gq *blkg)
498 {
499 percpu_ref_get(&blkg->refcnt);
500 }
501
502 /**
503 * blkg_tryget - try and get a blkg reference
504 * @blkg: blkg to get
505 *
506 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
507 * of freeing this blkg, so we can only use it if the refcnt is not zero.
508 */
blkg_tryget(struct blkcg_gq * blkg)509 static inline bool blkg_tryget(struct blkcg_gq *blkg)
510 {
511 return blkg && percpu_ref_tryget(&blkg->refcnt);
512 }
513
514 /**
515 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
516 * @blkg: blkg to get
517 *
518 * This needs to be called rcu protected. As the failure mode here is to walk
519 * up the blkg tree, this ensure that the blkg->parent pointers are always
520 * valid. This returns the blkg that it ended up taking a reference on or %NULL
521 * if no reference was taken.
522 */
blkg_tryget_closest(struct blkcg_gq * blkg)523 static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
524 {
525 struct blkcg_gq *ret_blkg = NULL;
526
527 WARN_ON_ONCE(!rcu_read_lock_held());
528
529 while (blkg) {
530 if (blkg_tryget(blkg)) {
531 ret_blkg = blkg;
532 break;
533 }
534 blkg = blkg->parent;
535 }
536
537 return ret_blkg;
538 }
539
540 /**
541 * blkg_put - put a blkg reference
542 * @blkg: blkg to put
543 */
blkg_put(struct blkcg_gq * blkg)544 static inline void blkg_put(struct blkcg_gq *blkg)
545 {
546 percpu_ref_put(&blkg->refcnt);
547 }
548
549 /**
550 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
551 * @d_blkg: loop cursor pointing to the current descendant
552 * @pos_css: used for iteration
553 * @p_blkg: target blkg to walk descendants of
554 *
555 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
556 * read locked. If called under either blkcg or queue lock, the iteration
557 * is guaranteed to include all and only online blkgs. The caller may
558 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
559 * @p_blkg is included in the iteration and the first node to be visited.
560 */
561 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
562 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
563 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
564 (p_blkg)->q, false)))
565
566 /**
567 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
568 * @d_blkg: loop cursor pointing to the current descendant
569 * @pos_css: used for iteration
570 * @p_blkg: target blkg to walk descendants of
571 *
572 * Similar to blkg_for_each_descendant_pre() but performs post-order
573 * traversal instead. Synchronization rules are the same. @p_blkg is
574 * included in the iteration and the last node to be visited.
575 */
576 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
577 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
578 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
579 (p_blkg)->q, false)))
580
blkg_rwstat_init(struct blkg_rwstat * rwstat,gfp_t gfp)581 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
582 {
583 int i, ret;
584
585 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
586 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
587 if (ret) {
588 while (--i >= 0)
589 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
590 return ret;
591 }
592 atomic64_set(&rwstat->aux_cnt[i], 0);
593 }
594 return 0;
595 }
596
blkg_rwstat_exit(struct blkg_rwstat * rwstat)597 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
598 {
599 int i;
600
601 for (i = 0; i < BLKG_RWSTAT_NR; i++)
602 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
603 }
604
605 /**
606 * blkg_rwstat_add - add a value to a blkg_rwstat
607 * @rwstat: target blkg_rwstat
608 * @op: REQ_OP and flags
609 * @val: value to add
610 *
611 * Add @val to @rwstat. The counters are chosen according to @rw. The
612 * caller is responsible for synchronizing calls to this function.
613 */
blkg_rwstat_add(struct blkg_rwstat * rwstat,unsigned int op,uint64_t val)614 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
615 unsigned int op, uint64_t val)
616 {
617 struct percpu_counter *cnt;
618
619 if (op_is_discard(op))
620 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
621 else if (op_is_write(op))
622 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
623 else
624 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
625
626 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
627
628 if (op_is_sync(op))
629 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
630 else
631 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
632
633 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
634 }
635
636 /**
637 * blkg_rwstat_read - read the current values of a blkg_rwstat
638 * @rwstat: blkg_rwstat to read
639 *
640 * Read the current snapshot of @rwstat and return it in the aux counts.
641 */
blkg_rwstat_read(struct blkg_rwstat * rwstat,struct blkg_rwstat_sample * result)642 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
643 struct blkg_rwstat_sample *result)
644 {
645 int i;
646
647 for (i = 0; i < BLKG_RWSTAT_NR; i++)
648 result->cnt[i] =
649 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
650 }
651
652 /**
653 * blkg_rwstat_total - read the total count of a blkg_rwstat
654 * @rwstat: blkg_rwstat to read
655 *
656 * Return the total count of @rwstat regardless of the IO direction. This
657 * function can be called without synchronization and takes care of u64
658 * atomicity.
659 */
blkg_rwstat_total(struct blkg_rwstat * rwstat)660 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
661 {
662 struct blkg_rwstat_sample tmp = { };
663
664 blkg_rwstat_read(rwstat, &tmp);
665 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
666 }
667
668 /**
669 * blkg_rwstat_reset - reset a blkg_rwstat
670 * @rwstat: blkg_rwstat to reset
671 */
blkg_rwstat_reset(struct blkg_rwstat * rwstat)672 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
673 {
674 int i;
675
676 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
677 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
678 atomic64_set(&rwstat->aux_cnt[i], 0);
679 }
680 }
681
682 /**
683 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
684 * @to: the destination blkg_rwstat
685 * @from: the source
686 *
687 * Add @from's count including the aux one to @to's aux count.
688 */
blkg_rwstat_add_aux(struct blkg_rwstat * to,struct blkg_rwstat * from)689 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
690 struct blkg_rwstat *from)
691 {
692 u64 sum[BLKG_RWSTAT_NR];
693 int i;
694
695 for (i = 0; i < BLKG_RWSTAT_NR; i++)
696 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
697
698 for (i = 0; i < BLKG_RWSTAT_NR; i++)
699 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
700 &to->aux_cnt[i]);
701 }
702
703 #ifdef CONFIG_BLK_DEV_THROTTLING
704 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
705 struct bio *bio);
706 #else
blk_throtl_bio(struct request_queue * q,struct blkcg_gq * blkg,struct bio * bio)707 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
708 struct bio *bio) { return false; }
709 #endif
710
711 bool __blkcg_punt_bio_submit(struct bio *bio);
712
blkcg_punt_bio_submit(struct bio * bio)713 static inline bool blkcg_punt_bio_submit(struct bio *bio)
714 {
715 if (bio->bi_opf & REQ_CGROUP_PUNT)
716 return __blkcg_punt_bio_submit(bio);
717 else
718 return false;
719 }
720
blkcg_bio_issue_init(struct bio * bio)721 static inline void blkcg_bio_issue_init(struct bio *bio)
722 {
723 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
724 }
725
blkcg_bio_issue_check(struct request_queue * q,struct bio * bio)726 static inline bool blkcg_bio_issue_check(struct request_queue *q,
727 struct bio *bio)
728 {
729 struct blkcg_gq *blkg;
730 bool throtl = false;
731
732 rcu_read_lock();
733
734 if (!bio->bi_blkg) {
735 char b[BDEVNAME_SIZE];
736
737 WARN_ONCE(1,
738 "no blkg associated for bio on block-device: %s\n",
739 bio_devname(bio, b));
740 bio_associate_blkg(bio);
741 }
742
743 blkg = bio->bi_blkg;
744
745 throtl = blk_throtl_bio(q, blkg, bio);
746
747 if (!throtl) {
748 /*
749 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
750 * is a split bio and we would have already accounted for the
751 * size of the bio.
752 */
753 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
754 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
755 bio->bi_iter.bi_size);
756 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
757 }
758
759 blkcg_bio_issue_init(bio);
760
761 rcu_read_unlock();
762 return !throtl;
763 }
764
blkcg_use_delay(struct blkcg_gq * blkg)765 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
766 {
767 if (atomic_add_return(1, &blkg->use_delay) == 1)
768 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
769 }
770
blkcg_unuse_delay(struct blkcg_gq * blkg)771 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
772 {
773 int old = atomic_read(&blkg->use_delay);
774
775 if (old == 0)
776 return 0;
777
778 /*
779 * We do this song and dance because we can race with somebody else
780 * adding or removing delay. If we just did an atomic_dec we'd end up
781 * negative and we'd already be in trouble. We need to subtract 1 and
782 * then check to see if we were the last delay so we can drop the
783 * congestion count on the cgroup.
784 */
785 while (old) {
786 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
787 if (cur == old)
788 break;
789 old = cur;
790 }
791
792 if (old == 0)
793 return 0;
794 if (old == 1)
795 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
796 return 1;
797 }
798
blkcg_clear_delay(struct blkcg_gq * blkg)799 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
800 {
801 int old = atomic_read(&blkg->use_delay);
802 if (!old)
803 return;
804 /* We only want 1 person clearing the congestion count for this blkg. */
805 while (old) {
806 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
807 if (cur == old) {
808 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
809 break;
810 }
811 old = cur;
812 }
813 }
814
815 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
816 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
817 void blkcg_maybe_throttle_current(void);
818 #else /* CONFIG_BLK_CGROUP */
819
820 struct blkcg {
821 };
822
823 struct blkg_policy_data {
824 };
825
826 struct blkcg_policy_data {
827 };
828
829 struct blkcg_gq {
830 };
831
832 struct blkcg_policy {
833 };
834
835 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
836
blkcg_maybe_throttle_current(void)837 static inline void blkcg_maybe_throttle_current(void) { }
blk_cgroup_congested(void)838 static inline bool blk_cgroup_congested(void) { return false; }
839
840 #ifdef CONFIG_BLOCK
841
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)842 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
843
blkg_lookup(struct blkcg * blkcg,void * key)844 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blk_queue_root_blkg(struct request_queue * q)845 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
846 { return NULL; }
blkcg_init_queue(struct request_queue * q)847 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_drain_queue(struct request_queue * q)848 static inline void blkcg_drain_queue(struct request_queue *q) { }
blkcg_exit_queue(struct request_queue * q)849 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)850 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)851 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)852 static inline int blkcg_activate_policy(struct request_queue *q,
853 const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)854 static inline void blkcg_deactivate_policy(struct request_queue *q,
855 const struct blkcg_policy *pol) { }
856
__bio_blkcg(struct bio * bio)857 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
bio_blkcg(struct bio * bio)858 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
859
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)860 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
861 struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)862 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)863 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)864 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)865 static inline void blkg_put(struct blkcg_gq *blkg) { }
866
blkcg_punt_bio_submit(struct bio * bio)867 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
blkcg_bio_issue_init(struct bio * bio)868 static inline void blkcg_bio_issue_init(struct bio *bio) { }
blkcg_bio_issue_check(struct request_queue * q,struct bio * bio)869 static inline bool blkcg_bio_issue_check(struct request_queue *q,
870 struct bio *bio) { return true; }
871
872 #define blk_queue_for_each_rl(rl, q) \
873 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
874
875 #endif /* CONFIG_BLOCK */
876 #endif /* CONFIG_BLK_CGROUP */
877 #endif /* _BLK_CGROUP_H */
878