Lines Matching refs:blkg

73 static void blkg_free(struct blkcg_gq *blkg)  in blkg_free()  argument
77 if (!blkg) in blkg_free()
81 if (blkg->pd[i]) in blkg_free()
82 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free()
84 free_percpu(blkg->iostat_cpu); in blkg_free()
85 percpu_ref_exit(&blkg->refcnt); in blkg_free()
86 kfree(blkg); in blkg_free()
91 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); in __blkg_release() local
93 WARN_ON(!bio_list_empty(&blkg->async_bios)); in __blkg_release()
96 css_put(&blkg->blkcg->css); in __blkg_release()
97 if (blkg->parent) in __blkg_release()
98 blkg_put(blkg->parent); in __blkg_release()
99 blkg_free(blkg); in __blkg_release()
112 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); in blkg_release() local
114 call_rcu(&blkg->rcu_head, __blkg_release); in blkg_release()
119 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, in blkg_async_bio_workfn() local
127 spin_lock_bh(&blkg->async_bio_lock); in blkg_async_bio_workfn()
128 bio_list_merge(&bios, &blkg->async_bios); in blkg_async_bio_workfn()
129 bio_list_init(&blkg->async_bios); in blkg_async_bio_workfn()
130 spin_unlock_bh(&blkg->async_bio_lock); in blkg_async_bio_workfn()
154 struct blkcg_gq *blkg; in blkg_alloc() local
158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
159 if (!blkg) in blkg_alloc()
162 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) in blkg_alloc()
165 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); in blkg_alloc()
166 if (!blkg->iostat_cpu) in blkg_alloc()
169 blkg->q = q; in blkg_alloc()
170 INIT_LIST_HEAD(&blkg->q_node); in blkg_alloc()
171 spin_lock_init(&blkg->async_bio_lock); in blkg_alloc()
172 bio_list_init(&blkg->async_bios); in blkg_alloc()
173 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); in blkg_alloc()
174 blkg->blkcg = blkcg; in blkg_alloc()
176 u64_stats_init(&blkg->iostat.sync); in blkg_alloc()
178 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); in blkg_alloc()
192 blkg->pd[i] = pd; in blkg_alloc()
193 pd->blkg = blkg; in blkg_alloc()
197 return blkg; in blkg_alloc()
200 blkg_free(blkg); in blkg_alloc()
207 struct blkcg_gq *blkg; in blkg_lookup_slowpath() local
215 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
216 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
219 rcu_assign_pointer(blkcg->blkg_hint, blkg); in blkg_lookup_slowpath()
221 return blkg; in blkg_lookup_slowpath()
236 struct blkcg_gq *blkg; in blkg_create() local
262 blkg = new_blkg; in blkg_create()
266 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
267 if (WARN_ON_ONCE(!blkg->parent)) { in blkg_create()
271 blkg_get(blkg->parent); in blkg_create()
278 if (blkg->pd[i] && pol->pd_init_fn) in blkg_create()
279 pol->pd_init_fn(blkg->pd[i]); in blkg_create()
284 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
286 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); in blkg_create()
287 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
292 if (blkg->pd[i] && pol->pd_online_fn) in blkg_create()
293 pol->pd_online_fn(blkg->pd[i]); in blkg_create()
296 blkg->online = true; in blkg_create()
300 return blkg; in blkg_create()
303 blkg_put(blkg); in blkg_create()
329 struct blkcg_gq *blkg; in blkg_lookup_create() local
334 blkg = blkg_lookup(blkcg, q); in blkg_lookup_create()
335 if (blkg) in blkg_lookup_create()
336 return blkg; in blkg_lookup_create()
339 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
340 if (blkg) in blkg_lookup_create()
354 blkg = __blkg_lookup(parent, q, false); in blkg_lookup_create()
355 if (blkg) { in blkg_lookup_create()
357 ret_blkg = blkg; in blkg_lookup_create()
364 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
365 if (IS_ERR(blkg)) { in blkg_lookup_create()
366 blkg = ret_blkg; in blkg_lookup_create()
375 return blkg; in blkg_lookup_create()
378 static void blkg_destroy(struct blkcg_gq *blkg) in blkg_destroy() argument
380 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy()
383 lockdep_assert_held(&blkg->q->queue_lock); in blkg_destroy()
387 WARN_ON_ONCE(list_empty(&blkg->q_node)); in blkg_destroy()
388 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); in blkg_destroy()
393 if (blkg->pd[i] && pol->pd_offline_fn) in blkg_destroy()
394 pol->pd_offline_fn(blkg->pd[i]); in blkg_destroy()
397 blkg->online = false; in blkg_destroy()
399 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
400 list_del_init(&blkg->q_node); in blkg_destroy()
401 hlist_del_init_rcu(&blkg->blkcg_node); in blkg_destroy()
408 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) in blkg_destroy()
415 percpu_ref_kill(&blkg->refcnt); in blkg_destroy()
426 struct blkcg_gq *blkg, *n; in blkg_destroy_all() local
431 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
432 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy_all()
435 blkg_destroy(blkg); in blkg_destroy_all()
458 struct blkcg_gq *blkg; in blkcg_reset_stats() local
469 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_reset_stats()
472 per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_reset_stats()
475 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); in blkcg_reset_stats()
480 if (blkg->pd[i] && pol->pd_reset_stats_fn) in blkcg_reset_stats()
481 pol->pd_reset_stats_fn(blkg->pd[i]); in blkcg_reset_stats()
490 const char *blkg_dev_name(struct blkcg_gq *blkg) in blkg_dev_name() argument
492 if (!blkg->q->disk || !blkg->q->disk->bdi->dev) in blkg_dev_name()
494 return bdi_dev_name(blkg->q->disk->bdi); in blkg_dev_name()
521 struct blkcg_gq *blkg; in blkcg_print_blkgs() local
525 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_blkgs()
526 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
527 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
528 total += prfill(sf, blkg->pd[pol->plid], data); in blkcg_print_blkgs()
529 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
548 const char *dname = blkg_dev_name(pd->blkg); in __blkg_prfill_u64()
627 struct blkcg_gq *blkg; in blkg_conf_prep() local
639 blkg = blkg_lookup_check(blkcg, pol, q); in blkg_conf_prep()
640 if (IS_ERR(blkg)) { in blkg_conf_prep()
641 ret = PTR_ERR(blkg); in blkg_conf_prep()
645 if (blkg) in blkg_conf_prep()
682 blkg = blkg_lookup_check(pos, pol, q); in blkg_conf_prep()
683 if (IS_ERR(blkg)) { in blkg_conf_prep()
684 ret = PTR_ERR(blkg); in blkg_conf_prep()
689 if (blkg) { in blkg_conf_prep()
692 blkg = blkg_create(pos, q, new_blkg); in blkg_conf_prep()
693 if (IS_ERR(blkg)) { in blkg_conf_prep()
694 ret = PTR_ERR(blkg); in blkg_conf_prep()
706 ctx->blkg = blkg; in blkg_conf_prep()
780 struct blkcg_gq *blkg; in blkcg_rstat_flush() local
788 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_rstat_flush()
789 struct blkcg_gq *parent = blkg->parent; in blkcg_rstat_flush()
790 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_rstat_flush()
802 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); in blkcg_rstat_flush()
805 blkg_iostat_add(&blkg->iostat.cur, &delta); in blkcg_rstat_flush()
807 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); in blkcg_rstat_flush()
812 blkg_iostat_set(&delta, &blkg->iostat.cur); in blkcg_rstat_flush()
813 blkg_iostat_sub(&delta, &blkg->iostat.last); in blkcg_rstat_flush()
815 blkg_iostat_add(&blkg->iostat.last, &delta); in blkcg_rstat_flush()
843 struct blkcg_gq *blkg = in blkcg_fill_root_iostats() local
868 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); in blkcg_fill_root_iostats()
869 blkg_iostat_set(&blkg->iostat.cur, &tmp); in blkcg_fill_root_iostats()
870 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); in blkcg_fill_root_iostats()
875 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) in blkcg_print_one_stat() argument
877 struct blkg_iostat_set *bis = &blkg->iostat; in blkcg_print_one_stat()
884 if (!blkg->online) in blkcg_print_one_stat()
887 dname = blkg_dev_name(blkg); in blkcg_print_one_stat()
911 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { in blkcg_print_one_stat()
914 atomic_read(&blkg->use_delay), in blkcg_print_one_stat()
915 atomic64_read(&blkg->delay_nsec)); in blkcg_print_one_stat()
921 if (!blkg->pd[i] || !pol->pd_stat_fn) in blkcg_print_one_stat()
924 if (pol->pd_stat_fn(blkg->pd[i], s)) in blkcg_print_one_stat()
935 struct blkcg_gq *blkg; in blkcg_print_stat() local
943 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_stat()
944 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
945 blkcg_print_one_stat(blkg, sf); in blkcg_print_stat()
946 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1026 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, in blkcg_destroy_blkgs() local
1028 struct request_queue *q = blkg->q; in blkcg_destroy_blkgs()
1042 blkg_destroy(blkg); in blkcg_destroy_blkgs()
1162 struct blkcg_gq *new_blkg, *blkg; in blkcg_init_queue() local
1175 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_init_queue()
1176 if (IS_ERR(blkg)) in blkcg_init_queue()
1178 q->root_blkg = blkg; in blkcg_init_queue()
1209 return PTR_ERR(blkg); in blkcg_init_queue()
1293 struct blkcg_gq *blkg, *pinned_blkg = NULL; in blkcg_activate_policy() local
1305 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1308 if (blkg->pd[pol->plid]) in blkcg_activate_policy()
1312 if (blkg == pinned_blkg) { in blkcg_activate_policy()
1317 blkg->blkcg); in blkcg_activate_policy()
1327 blkg_get(blkg); in blkcg_activate_policy()
1328 pinned_blkg = blkg; in blkcg_activate_policy()
1335 blkg->blkcg); in blkcg_activate_policy()
1342 blkg->pd[pol->plid] = pd; in blkcg_activate_policy()
1343 pd->blkg = blkg; in blkcg_activate_policy()
1349 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1350 pol->pd_init_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1368 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1369 struct blkcg *blkcg = blkg->blkcg; in blkcg_activate_policy()
1372 if (blkg->pd[pol->plid]) { in blkcg_activate_policy()
1373 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1374 blkg->pd[pol->plid] = NULL; in blkcg_activate_policy()
1395 struct blkcg_gq *blkg; in blkcg_deactivate_policy() local
1407 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1408 struct blkcg *blkcg = blkg->blkcg; in blkcg_deactivate_policy()
1411 if (blkg->pd[pol->plid]) { in blkcg_deactivate_policy()
1413 pol->pd_offline_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1414 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1415 blkg->pd[pol->plid] = NULL; in blkcg_deactivate_policy()
1549 struct blkcg_gq *blkg = bio->bi_blkg; in __blkcg_punt_bio_submit() local
1555 if (!blkg->parent) in __blkcg_punt_bio_submit()
1558 spin_lock_bh(&blkg->async_bio_lock); in __blkcg_punt_bio_submit()
1559 bio_list_add(&blkg->async_bios, bio); in __blkcg_punt_bio_submit()
1560 spin_unlock_bh(&blkg->async_bio_lock); in __blkcg_punt_bio_submit()
1562 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); in __blkcg_punt_bio_submit()
1572 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) in blkcg_scale_delay() argument
1574 u64 old = atomic64_read(&blkg->delay_start); in blkcg_scale_delay()
1577 if (atomic_read(&blkg->use_delay) < 0) in blkcg_scale_delay()
1594 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { in blkcg_scale_delay()
1595 u64 cur = atomic64_read(&blkg->delay_nsec); in blkcg_scale_delay()
1596 u64 sub = min_t(u64, blkg->last_delay, now - old); in blkcg_scale_delay()
1597 int cur_use = atomic_read(&blkg->use_delay); in blkcg_scale_delay()
1603 if (cur_use < blkg->last_use) in blkcg_scale_delay()
1604 sub = max_t(u64, sub, blkg->last_delay >> 1); in blkcg_scale_delay()
1613 atomic64_set(&blkg->delay_nsec, 0); in blkcg_scale_delay()
1614 blkg->last_delay = 0; in blkcg_scale_delay()
1616 atomic64_sub(sub, &blkg->delay_nsec); in blkcg_scale_delay()
1617 blkg->last_delay = cur - sub; in blkcg_scale_delay()
1619 blkg->last_use = cur_use; in blkcg_scale_delay()
1629 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) in blkcg_maybe_throttle_blkg() argument
1638 while (blkg->parent) { in blkcg_maybe_throttle_blkg()
1639 int use_delay = atomic_read(&blkg->use_delay); in blkcg_maybe_throttle_blkg()
1644 blkcg_scale_delay(blkg, now); in blkcg_maybe_throttle_blkg()
1645 this_delay = atomic64_read(&blkg->delay_nsec); in blkcg_maybe_throttle_blkg()
1651 blkg = blkg->parent; in blkcg_maybe_throttle_blkg()
1700 struct blkcg_gq *blkg; in blkcg_maybe_throttle_current() local
1718 blkg = blkg_lookup(blkcg, q); in blkcg_maybe_throttle_current()
1719 if (!blkg) in blkcg_maybe_throttle_current()
1721 if (!blkg_tryget(blkg)) in blkcg_maybe_throttle_current()
1725 blkcg_maybe_throttle_blkg(blkg, use_memdelay); in blkcg_maybe_throttle_current()
1726 blkg_put(blkg); in blkcg_maybe_throttle_current()
1779 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) in blkcg_add_delay() argument
1781 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) in blkcg_add_delay()
1783 blkcg_scale_delay(blkg, now); in blkcg_add_delay()
1784 atomic64_add(delta, &blkg->delay_nsec); in blkcg_add_delay()
1799 struct blkcg_gq *blkg, *ret_blkg = NULL; in blkg_tryget_closest() local
1802 blkg = blkg_lookup_create(css_to_blkcg(css), in blkg_tryget_closest()
1804 while (blkg) { in blkg_tryget_closest()
1805 if (blkg_tryget(blkg)) { in blkg_tryget_closest()
1806 ret_blkg = blkg; in blkg_tryget_closest()
1809 blkg = blkg->parent; in blkg_tryget_closest()