Lines Matching refs:bio

29 	struct bio		*free_list;
110 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
209 void bio_uninit(struct bio *bio) in bio_uninit() argument
212 if (bio->bi_blkg) { in bio_uninit()
213 blkg_put(bio->bi_blkg); in bio_uninit()
214 bio->bi_blkg = NULL; in bio_uninit()
217 if (bio_integrity(bio)) in bio_uninit()
218 bio_integrity_free(bio); in bio_uninit()
220 bio_crypt_free_ctx(bio); in bio_uninit()
224 static void bio_free(struct bio *bio) in bio_free() argument
226 struct bio_set *bs = bio->bi_pool; in bio_free()
227 void *p = bio; in bio_free()
231 bio_uninit(bio); in bio_free()
232 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
241 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, in bio_init() argument
244 bio->bi_next = NULL; in bio_init()
245 bio->bi_bdev = bdev; in bio_init()
246 bio->bi_opf = opf; in bio_init()
247 bio->bi_flags = 0; in bio_init()
248 bio->bi_ioprio = 0; in bio_init()
249 bio->bi_status = 0; in bio_init()
250 bio->bi_iter.bi_sector = 0; in bio_init()
251 bio->bi_iter.bi_size = 0; in bio_init()
252 bio->bi_iter.bi_idx = 0; in bio_init()
253 bio->bi_iter.bi_bvec_done = 0; in bio_init()
254 bio->bi_end_io = NULL; in bio_init()
255 bio->bi_private = NULL; in bio_init()
257 bio->bi_blkg = NULL; in bio_init()
258 bio->bi_issue.value = 0; in bio_init()
260 bio_associate_blkg(bio); in bio_init()
262 bio->bi_iocost_cost = 0; in bio_init()
266 bio->bi_crypt_context = NULL; in bio_init()
269 bio->bi_integrity = NULL; in bio_init()
271 bio->bi_vcnt = 0; in bio_init()
273 atomic_set(&bio->__bi_remaining, 1); in bio_init()
274 atomic_set(&bio->__bi_cnt, 1); in bio_init()
275 bio->bi_cookie = BLK_QC_T_NONE; in bio_init()
277 bio->bi_max_vecs = max_vecs; in bio_init()
278 bio->bi_io_vec = table; in bio_init()
279 bio->bi_pool = NULL; in bio_init()
295 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) in bio_reset() argument
297 bio_uninit(bio); in bio_reset()
298 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
299 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
300 bio->bi_bdev = bdev; in bio_reset()
301 if (bio->bi_bdev) in bio_reset()
302 bio_associate_blkg(bio); in bio_reset()
303 bio->bi_opf = opf; in bio_reset()
307 static struct bio *__bio_chain_endio(struct bio *bio) in __bio_chain_endio() argument
309 struct bio *parent = bio->bi_private; in __bio_chain_endio()
311 if (bio->bi_status && !parent->bi_status) in __bio_chain_endio()
312 parent->bi_status = bio->bi_status; in __bio_chain_endio()
313 bio_put(bio); in __bio_chain_endio()
317 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
319 bio_endio(__bio_chain_endio(bio)); in bio_chain_endio()
333 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
335 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
337 bio->bi_private = parent; in bio_chain()
338 bio->bi_end_io = bio_chain_endio; in bio_chain()
343 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, in blk_next_bio() argument
346 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); in blk_next_bio()
348 if (bio) { in blk_next_bio()
349 bio_chain(bio, new); in blk_next_bio()
350 submit_bio(bio); in blk_next_bio()
360 struct bio *bio; in bio_alloc_rescue() local
364 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
367 if (!bio) in bio_alloc_rescue()
370 submit_bio_noacct(bio); in bio_alloc_rescue()
377 struct bio *bio; in punt_bios_to_rescuer() local
395 while ((bio = bio_list_pop(&current->bio_list[0]))) in punt_bios_to_rescuer()
396 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
400 while ((bio = bio_list_pop(&current->bio_list[1]))) in punt_bios_to_rescuer()
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
411 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, in bio_alloc_percpu_cache()
416 struct bio *bio; in bio_alloc_percpu_cache() local
423 bio = cache->free_list; in bio_alloc_percpu_cache()
424 cache->free_list = bio->bi_next; in bio_alloc_percpu_cache()
428 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); in bio_alloc_percpu_cache()
429 bio->bi_pool = bs; in bio_alloc_percpu_cache()
430 return bio; in bio_alloc_percpu_cache()
470 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, in bio_alloc_bioset()
475 struct bio *bio; in bio_alloc_bioset() local
484 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, in bio_alloc_bioset()
486 if (bio) in bio_alloc_bioset()
487 return bio; in bio_alloc_bioset()
530 bio = p + bs->front_pad; in bio_alloc_bioset()
543 bio_init(bio, bdev, bvl, nr_vecs, opf); in bio_alloc_bioset()
545 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); in bio_alloc_bioset()
547 bio_init(bio, bdev, NULL, 0, opf); in bio_alloc_bioset()
550 bio->bi_pool = bs; in bio_alloc_bioset()
551 return bio; in bio_alloc_bioset()
575 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) in bio_kmalloc()
577 struct bio *bio; in bio_kmalloc() local
581 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); in bio_kmalloc()
585 void zero_fill_bio(struct bio *bio) in zero_fill_bio() argument
590 bio_for_each_segment(bv, bio, iter) in zero_fill_bio()
605 static void bio_truncate(struct bio *bio, unsigned new_size) in bio_truncate() argument
612 if (new_size >= bio->bi_iter.bi_size) in bio_truncate()
615 if (bio_op(bio) != REQ_OP_READ) in bio_truncate()
618 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
642 bio->bi_iter.bi_size = new_size; in bio_truncate()
657 void guard_bio_eod(struct bio *bio) in guard_bio_eod() argument
659 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in guard_bio_eod()
669 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
672 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
673 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod()
676 bio_truncate(bio, maxsector << 9); in guard_bio_eod()
686 struct bio *bio; in bio_alloc_cache_prune() local
688 while ((bio = cache->free_list) != NULL) { in bio_alloc_cache_prune()
689 cache->free_list = bio->bi_next; in bio_alloc_cache_prune()
691 bio_free(bio); in bio_alloc_cache_prune()
736 void bio_put(struct bio *bio) in bio_put() argument
738 if (unlikely(bio_flagged(bio, BIO_REFFED))) { in bio_put()
739 BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
740 if (!atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
744 if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) { in bio_put()
747 bio_uninit(bio); in bio_put()
748 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); in bio_put()
749 bio->bi_next = cache->free_list; in bio_put()
750 cache->free_list = bio; in bio_put()
755 bio_free(bio); in bio_put()
760 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) in __bio_clone() argument
762 bio_set_flag(bio, BIO_CLONED); in __bio_clone()
763 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone()
764 bio->bi_iter = bio_src->bi_iter; in __bio_clone()
766 if (bio->bi_bdev) { in __bio_clone()
767 if (bio->bi_bdev == bio_src->bi_bdev && in __bio_clone()
769 bio_set_flag(bio, BIO_REMAPPED); in __bio_clone()
770 bio_clone_blkg_association(bio, bio_src); in __bio_clone()
773 if (bio_crypt_clone(bio, bio_src, gfp) < 0) in __bio_clone()
776 bio_integrity_clone(bio, bio_src, gfp) < 0) in __bio_clone()
793 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, in bio_alloc_clone()
796 struct bio *bio; in bio_alloc_clone() local
798 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
799 if (!bio) in bio_alloc_clone()
802 if (__bio_clone(bio, bio_src, gfp) < 0) { in bio_alloc_clone()
803 bio_put(bio); in bio_alloc_clone()
806 bio->bi_io_vec = bio_src->bi_io_vec; in bio_alloc_clone()
808 return bio; in bio_alloc_clone()
824 int bio_init_clone(struct block_device *bdev, struct bio *bio, in bio_init_clone() argument
825 struct bio *bio_src, gfp_t gfp) in bio_init_clone()
829 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); in bio_init_clone()
830 ret = __bio_clone(bio, bio_src, gfp); in bio_init_clone()
832 bio_uninit(bio); in bio_init_clone()
845 static inline bool bio_full(struct bio *bio, unsigned len) in bio_full() argument
847 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_full()
849 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_full()
891 static bool __bio_try_merge_page(struct bio *bio, struct page *page, in __bio_try_merge_page() argument
894 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in __bio_try_merge_page()
897 if (bio->bi_vcnt > 0) { in __bio_try_merge_page()
898 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_try_merge_page()
901 if (bio->bi_iter.bi_size > UINT_MAX - len) { in __bio_try_merge_page()
906 bio->bi_iter.bi_size += len; in __bio_try_merge_page()
918 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, in bio_try_merge_hw_seg() argument
922 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_try_merge_hw_seg()
931 return __bio_try_merge_page(bio, page, len, offset, same_page); in bio_try_merge_hw_seg()
947 int bio_add_hw_page(struct request_queue *q, struct bio *bio, in bio_add_hw_page() argument
953 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_hw_page()
956 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) in bio_add_hw_page()
959 if (bio->bi_vcnt > 0) { in bio_add_hw_page()
960 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) in bio_add_hw_page()
967 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_hw_page()
972 if (bio_full(bio, len)) in bio_add_hw_page()
975 if (bio->bi_vcnt >= queue_max_segments(q)) in bio_add_hw_page()
978 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in bio_add_hw_page()
982 bio->bi_vcnt++; in bio_add_hw_page()
983 bio->bi_iter.bi_size += len; in bio_add_hw_page()
1002 int bio_add_pc_page(struct request_queue *q, struct bio *bio, in bio_add_pc_page() argument
1006 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_pc_page()
1027 int bio_add_zone_append_page(struct bio *bio, struct page *page, in bio_add_zone_append_page() argument
1030 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_zone_append_page()
1033 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) in bio_add_zone_append_page()
1036 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) in bio_add_zone_append_page()
1039 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_zone_append_page()
1054 void __bio_add_page(struct bio *bio, struct page *page, in __bio_add_page() argument
1057 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_page()
1059 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in __bio_add_page()
1060 WARN_ON_ONCE(bio_full(bio, len)); in __bio_add_page()
1066 bio->bi_iter.bi_size += len; in __bio_add_page()
1067 bio->bi_vcnt++; in __bio_add_page()
1081 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
1086 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { in bio_add_page()
1087 if (bio_full(bio, len)) in bio_add_page()
1089 __bio_add_page(bio, page, len, offset); in bio_add_page()
1109 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, in bio_add_folio() argument
1114 return bio_add_page(bio, &folio->page, len, off) > 0; in bio_add_folio()
1117 void __bio_release_pages(struct bio *bio, bool mark_dirty) in __bio_release_pages() argument
1122 bio_for_each_segment_all(bvec, bio, iter_all) { in __bio_release_pages()
1130 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) in bio_iov_bvec_set() argument
1134 WARN_ON_ONCE(bio->bi_max_vecs); in bio_iov_bvec_set()
1136 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in bio_iov_bvec_set()
1137 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_bvec_set()
1143 bio->bi_vcnt = iter->nr_segs; in bio_iov_bvec_set()
1144 bio->bi_io_vec = (struct bio_vec *)iter->bvec; in bio_iov_bvec_set()
1145 bio->bi_iter.bi_bvec_done = iter->iov_offset; in bio_iov_bvec_set()
1146 bio->bi_iter.bi_size = size; in bio_iov_bvec_set()
1147 bio_set_flag(bio, BIO_NO_PAGE_REF); in bio_iov_bvec_set()
1148 bio_set_flag(bio, BIO_CLONED); in bio_iov_bvec_set()
1151 static int bio_iov_add_page(struct bio *bio, struct page *page, in bio_iov_add_page() argument
1156 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { in bio_iov_add_page()
1157 __bio_add_page(bio, page, len, offset); in bio_iov_add_page()
1166 static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, in bio_iov_add_zone_append_page() argument
1169 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_add_zone_append_page()
1172 if (bio_add_hw_page(q, bio, page, len, offset, in bio_iov_add_zone_append_page()
1192 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_iter_get_pages() argument
1194 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1195 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1196 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
1218 size = iov_iter_get_pages2(iter, pages, UINT_MAX - bio->bi_iter.bi_size, in __bio_iov_iter_get_pages()
1225 trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); in __bio_iov_iter_get_pages()
1238 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { in __bio_iov_iter_get_pages()
1239 ret = bio_iov_add_zone_append_page(bio, page, len, in __bio_iov_iter_get_pages()
1244 bio_iov_add_page(bio, page, len, offset); in __bio_iov_iter_get_pages()
1277 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in bio_iov_iter_get_pages() argument
1282 bio_iov_bvec_set(bio, iter); in bio_iov_iter_get_pages()
1283 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_iov_iter_get_pages()
1288 ret = __bio_iov_iter_get_pages(bio, iter); in bio_iov_iter_get_pages()
1289 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); in bio_iov_iter_get_pages()
1291 return bio->bi_vcnt ? 0 : ret; in bio_iov_iter_get_pages()
1295 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
1297 complete(bio->bi_private); in submit_bio_wait_endio()
1311 int submit_bio_wait(struct bio *bio) in submit_bio_wait() argument
1314 bio->bi_bdev->bd_disk->lockdep_map); in submit_bio_wait()
1317 bio->bi_private = &done; in submit_bio_wait()
1318 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
1319 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
1320 submit_bio(bio); in submit_bio_wait()
1331 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
1335 void __bio_advance(struct bio *bio, unsigned bytes) in __bio_advance() argument
1337 if (bio_integrity(bio)) in __bio_advance()
1338 bio_integrity_advance(bio, bytes); in __bio_advance()
1340 bio_crypt_advance(bio, bytes); in __bio_advance()
1341 bio_advance_iter(bio, &bio->bi_iter, bytes); in __bio_advance()
1345 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, in bio_copy_data_iter()
1346 struct bio *src, struct bvec_iter *src_iter) in bio_copy_data_iter()
1374 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1383 void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1388 bio_for_each_segment_all(bvec, bio, iter_all) in bio_free_pages()
1422 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1427 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_set_pages_dirty()
1448 static struct bio *bio_dirty_list;
1455 struct bio *bio, *next; in bio_dirty_fn() local
1462 while ((bio = next) != NULL) { in bio_dirty_fn()
1463 next = bio->bi_private; in bio_dirty_fn()
1465 bio_release_pages(bio, true); in bio_dirty_fn()
1466 bio_put(bio); in bio_dirty_fn()
1470 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1476 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_check_pages_dirty()
1481 bio_release_pages(bio, false); in bio_check_pages_dirty()
1482 bio_put(bio); in bio_check_pages_dirty()
1486 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1487 bio_dirty_list = bio; in bio_check_pages_dirty()
1492 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1498 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1501 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1503 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1504 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1524 void bio_endio(struct bio *bio) in bio_endio() argument
1527 if (!bio_remaining_done(bio)) in bio_endio()
1529 if (!bio_integrity_endio(bio)) in bio_endio()
1532 rq_qos_done_bio(bio); in bio_endio()
1534 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1535 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
1536 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in bio_endio()
1547 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1548 bio = __bio_chain_endio(bio); in bio_endio()
1552 blk_throtl_bio_endio(bio); in bio_endio()
1554 bio_uninit(bio); in bio_endio()
1555 if (bio->bi_end_io) in bio_endio()
1556 bio->bi_end_io(bio); in bio_endio()
1574 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1577 struct bio *split; in bio_split()
1580 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1583 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) in bio_split()
1586 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1595 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1597 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) in bio_split()
1613 void bio_trim(struct bio *bio, sector_t offset, sector_t size) in bio_trim() argument
1616 offset + size > bio_sectors(bio))) in bio_trim()
1620 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1623 bio_advance(bio, offset << 9); in bio_trim()
1624 bio->bi_iter.bi_size = size; in bio_trim()
1626 if (bio_integrity(bio)) in bio_trim()
1627 bio_integrity_trim(bio); in bio_trim()