Lines Matching refs:bs

113 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)  in bs_bio_slab_size()  argument
115 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
118 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) in bio_find_or_create_slab() argument
120 unsigned int size = bs_bio_slab_size(bs); in bio_find_or_create_slab()
136 static void bio_put_slab(struct bio_set *bs) in bio_put_slab() argument
139 unsigned int slab_size = bs_bio_slab_size(bs); in bio_put_slab()
147 WARN_ON_ONCE(bslab->slab != bs->bio_slab); in bio_put_slab()
231 struct bio_set *bs = bio->bi_pool; in bio_free() local
234 WARN_ON_ONCE(!bs); in bio_free()
237 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
238 mempool_free(p - bs->front_pad, &bs->bio_pool); in bio_free()
364 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); in bio_alloc_rescue() local
368 spin_lock(&bs->rescue_lock); in bio_alloc_rescue()
369 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
370 spin_unlock(&bs->rescue_lock); in bio_alloc_rescue()
379 static void punt_bios_to_rescuer(struct bio_set *bs) in punt_bios_to_rescuer() argument
384 if (WARN_ON_ONCE(!bs->rescue_workqueue)) in punt_bios_to_rescuer()
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
406 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
409 spin_lock(&bs->rescue_lock); in punt_bios_to_rescuer()
410 bio_list_merge(&bs->rescue_list, &punt); in punt_bios_to_rescuer()
411 spin_unlock(&bs->rescue_lock); in punt_bios_to_rescuer()
413 queue_work(bs->rescue_workqueue, &bs->rescue_work); in punt_bios_to_rescuer()
434 struct bio_set *bs) in bio_alloc_percpu_cache() argument
439 cache = per_cpu_ptr(bs->cache, get_cpu()); in bio_alloc_percpu_cache()
454 bio->bi_pool = bs; in bio_alloc_percpu_cache()
494 struct bio_set *bs) in bio_alloc_bioset() argument
501 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) in bio_alloc_bioset()
505 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { in bio_alloc_bioset()
507 gfp_mask, bs); in bio_alloc_bioset()
540 bs->rescue_workqueue) in bio_alloc_bioset()
543 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
545 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
547 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
551 if (!mempool_is_saturated(&bs->bio_pool)) in bio_alloc_bioset()
554 bio = p + bs->front_pad; in bio_alloc_bioset()
558 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
560 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
562 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
574 bio->bi_pool = bs; in bio_alloc_bioset()
578 mempool_free(p, &bs->bio_pool); in bio_alloc_bioset()
731 struct bio_set *bs; in bio_cpu_dead() local
733 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); in bio_cpu_dead()
734 if (bs->cache) { in bio_cpu_dead()
735 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); in bio_cpu_dead()
742 static void bio_alloc_cache_destroy(struct bio_set *bs) in bio_alloc_cache_destroy() argument
746 if (!bs->cache) in bio_alloc_cache_destroy()
749 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bio_alloc_cache_destroy()
753 cache = per_cpu_ptr(bs->cache, cpu); in bio_alloc_cache_destroy()
756 free_percpu(bs->cache); in bio_alloc_cache_destroy()
757 bs->cache = NULL; in bio_alloc_cache_destroy()
846 gfp_t gfp, struct bio_set *bs) in bio_alloc_clone() argument
850 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
1622 gfp_t gfp, struct bio_set *bs) in bio_split() argument
1633 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1695 void bioset_exit(struct bio_set *bs) in bioset_exit() argument
1697 bio_alloc_cache_destroy(bs); in bioset_exit()
1698 if (bs->rescue_workqueue) in bioset_exit()
1699 destroy_workqueue(bs->rescue_workqueue); in bioset_exit()
1700 bs->rescue_workqueue = NULL; in bioset_exit()
1702 mempool_exit(&bs->bio_pool); in bioset_exit()
1703 mempool_exit(&bs->bvec_pool); in bioset_exit()
1705 bioset_integrity_free(bs); in bioset_exit()
1706 if (bs->bio_slab) in bioset_exit()
1707 bio_put_slab(bs); in bioset_exit()
1708 bs->bio_slab = NULL; in bioset_exit()
1733 int bioset_init(struct bio_set *bs, in bioset_init() argument
1738 bs->front_pad = front_pad; in bioset_init()
1740 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); in bioset_init()
1742 bs->back_pad = 0; in bioset_init()
1744 spin_lock_init(&bs->rescue_lock); in bioset_init()
1745 bio_list_init(&bs->rescue_list); in bioset_init()
1746 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); in bioset_init()
1748 bs->bio_slab = bio_find_or_create_slab(bs); in bioset_init()
1749 if (!bs->bio_slab) in bioset_init()
1752 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) in bioset_init()
1756 biovec_init_pool(&bs->bvec_pool, pool_size)) in bioset_init()
1760 bs->rescue_workqueue = alloc_workqueue("bioset", in bioset_init()
1762 if (!bs->rescue_workqueue) in bioset_init()
1766 bs->cache = alloc_percpu(struct bio_alloc_cache); in bioset_init()
1767 if (!bs->cache) in bioset_init()
1769 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bioset_init()
1774 bioset_exit(bs); in bioset_init()