Lines Matching refs:bs

108 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)  in bs_bio_slab_size()  argument
110 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
113 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) in bio_find_or_create_slab() argument
115 unsigned int size = bs_bio_slab_size(bs); in bio_find_or_create_slab()
131 static void bio_put_slab(struct bio_set *bs) in bio_put_slab() argument
134 unsigned int slab_size = bs_bio_slab_size(bs); in bio_put_slab()
142 WARN_ON_ONCE(bslab->slab != bs->bio_slab); in bio_put_slab()
226 struct bio_set *bs = bio->bi_pool; in bio_free() local
229 WARN_ON_ONCE(!bs); in bio_free()
232 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
233 mempool_free(p - bs->front_pad, &bs->bio_pool); in bio_free()
359 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); in bio_alloc_rescue() local
363 spin_lock(&bs->rescue_lock); in bio_alloc_rescue()
364 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
365 spin_unlock(&bs->rescue_lock); in bio_alloc_rescue()
374 static void punt_bios_to_rescuer(struct bio_set *bs) in punt_bios_to_rescuer() argument
379 if (WARN_ON_ONCE(!bs->rescue_workqueue)) in punt_bios_to_rescuer()
396 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
404 spin_lock(&bs->rescue_lock); in punt_bios_to_rescuer()
405 bio_list_merge(&bs->rescue_list, &punt); in punt_bios_to_rescuer()
406 spin_unlock(&bs->rescue_lock); in punt_bios_to_rescuer()
408 queue_work(bs->rescue_workqueue, &bs->rescue_work); in punt_bios_to_rescuer()
413 struct bio_set *bs) in bio_alloc_percpu_cache() argument
418 cache = per_cpu_ptr(bs->cache, get_cpu()); in bio_alloc_percpu_cache()
429 bio->bi_pool = bs; in bio_alloc_percpu_cache()
472 struct bio_set *bs) in bio_alloc_bioset() argument
479 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) in bio_alloc_bioset()
483 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { in bio_alloc_bioset()
485 gfp_mask, bs); in bio_alloc_bioset()
518 bs->rescue_workqueue) in bio_alloc_bioset()
521 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
523 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
525 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
530 bio = p + bs->front_pad; in bio_alloc_bioset()
534 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
536 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
538 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
550 bio->bi_pool = bs; in bio_alloc_bioset()
554 mempool_free(p, &bs->bio_pool); in bio_alloc_bioset()
699 struct bio_set *bs; in bio_cpu_dead() local
701 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); in bio_cpu_dead()
702 if (bs->cache) { in bio_cpu_dead()
703 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); in bio_cpu_dead()
710 static void bio_alloc_cache_destroy(struct bio_set *bs) in bio_alloc_cache_destroy() argument
714 if (!bs->cache) in bio_alloc_cache_destroy()
717 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bio_alloc_cache_destroy()
721 cache = per_cpu_ptr(bs->cache, cpu); in bio_alloc_cache_destroy()
724 free_percpu(bs->cache); in bio_alloc_cache_destroy()
725 bs->cache = NULL; in bio_alloc_cache_destroy()
794 gfp_t gfp, struct bio_set *bs) in bio_alloc_clone() argument
798 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
1575 gfp_t gfp, struct bio_set *bs) in bio_split() argument
1586 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1648 void bioset_exit(struct bio_set *bs) in bioset_exit() argument
1650 bio_alloc_cache_destroy(bs); in bioset_exit()
1651 if (bs->rescue_workqueue) in bioset_exit()
1652 destroy_workqueue(bs->rescue_workqueue); in bioset_exit()
1653 bs->rescue_workqueue = NULL; in bioset_exit()
1655 mempool_exit(&bs->bio_pool); in bioset_exit()
1656 mempool_exit(&bs->bvec_pool); in bioset_exit()
1658 bioset_integrity_free(bs); in bioset_exit()
1659 if (bs->bio_slab) in bioset_exit()
1660 bio_put_slab(bs); in bioset_exit()
1661 bs->bio_slab = NULL; in bioset_exit()
1686 int bioset_init(struct bio_set *bs, in bioset_init() argument
1691 bs->front_pad = front_pad; in bioset_init()
1693 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); in bioset_init()
1695 bs->back_pad = 0; in bioset_init()
1697 spin_lock_init(&bs->rescue_lock); in bioset_init()
1698 bio_list_init(&bs->rescue_list); in bioset_init()
1699 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); in bioset_init()
1701 bs->bio_slab = bio_find_or_create_slab(bs); in bioset_init()
1702 if (!bs->bio_slab) in bioset_init()
1705 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) in bioset_init()
1709 biovec_init_pool(&bs->bvec_pool, pool_size)) in bioset_init()
1713 bs->rescue_workqueue = alloc_workqueue("bioset", in bioset_init()
1715 if (!bs->rescue_workqueue) in bioset_init()
1719 bs->cache = alloc_percpu(struct bio_alloc_cache); in bioset_init()
1720 if (!bs->cache) in bioset_init()
1722 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bioset_init()
1727 bioset_exit(bs); in bioset_init()