Lines Matching refs:bio

64 	unsigned int sz = sizeof(struct bio) + extra_size;  in bio_find_or_create_slab()
233 void bio_uninit(struct bio *bio) in bio_uninit() argument
235 bio_disassociate_blkg(bio); in bio_uninit()
239 static void bio_free(struct bio *bio) in bio_free() argument
241 struct bio_set *bs = bio->bi_pool; in bio_free()
244 bio_uninit(bio); in bio_free()
247 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); in bio_free()
252 p = bio; in bio_free()
258 kfree(bio); in bio_free()
267 void bio_init(struct bio *bio, struct bio_vec *table, in bio_init() argument
270 memset(bio, 0, sizeof(*bio)); in bio_init()
271 atomic_set(&bio->__bi_remaining, 1); in bio_init()
272 atomic_set(&bio->__bi_cnt, 1); in bio_init()
274 bio->bi_io_vec = table; in bio_init()
275 bio->bi_max_vecs = max_vecs; in bio_init()
289 void bio_reset(struct bio *bio) in bio_reset() argument
291 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); in bio_reset()
293 bio_uninit(bio); in bio_reset()
295 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
296 bio->bi_flags = flags; in bio_reset()
297 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
301 static struct bio *__bio_chain_endio(struct bio *bio) in __bio_chain_endio() argument
303 struct bio *parent = bio->bi_private; in __bio_chain_endio()
306 parent->bi_status = bio->bi_status; in __bio_chain_endio()
307 bio_put(bio); in __bio_chain_endio()
311 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
313 bio_endio(__bio_chain_endio(bio)); in bio_chain_endio()
327 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
329 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
331 bio->bi_private = parent; in bio_chain()
332 bio->bi_end_io = bio_chain_endio; in bio_chain()
340 struct bio *bio; in bio_alloc_rescue() local
344 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
347 if (!bio) in bio_alloc_rescue()
350 generic_make_request(bio); in bio_alloc_rescue()
357 struct bio *bio; in punt_bios_to_rescuer() local
375 while ((bio = bio_list_pop(&current->bio_list[0]))) in punt_bios_to_rescuer()
376 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
380 while ((bio = bio_list_pop(&current->bio_list[1]))) in punt_bios_to_rescuer()
381 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
426 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, in bio_alloc_bioset()
433 struct bio *bio; in bio_alloc_bioset() local
440 p = kmalloc(sizeof(struct bio) + in bio_alloc_bioset()
491 bio = p + front_pad; in bio_alloc_bioset()
492 bio_init(bio, NULL, 0); in bio_alloc_bioset()
507 bio->bi_flags |= idx << BVEC_POOL_OFFSET; in bio_alloc_bioset()
509 bvl = bio->bi_inline_vecs; in bio_alloc_bioset()
512 bio->bi_pool = bs; in bio_alloc_bioset()
513 bio->bi_max_vecs = nr_iovecs; in bio_alloc_bioset()
514 bio->bi_io_vec = bvl; in bio_alloc_bioset()
515 return bio; in bio_alloc_bioset()
523 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) in zero_fill_bio_iter() argument
529 __bio_for_each_segment(bv, bio, iter, start) { in zero_fill_bio_iter()
546 void bio_put(struct bio *bio) in bio_put() argument
548 if (!bio_flagged(bio, BIO_REFFED)) in bio_put()
549 bio_free(bio); in bio_put()
551 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
556 if (atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
557 bio_free(bio); in bio_put()
573 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) in __bio_clone_fast() argument
575 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); in __bio_clone_fast()
581 bio->bi_disk = bio_src->bi_disk; in __bio_clone_fast()
582 bio->bi_partno = bio_src->bi_partno; in __bio_clone_fast()
583 bio_set_flag(bio, BIO_CLONED); in __bio_clone_fast()
585 bio_set_flag(bio, BIO_THROTTLED); in __bio_clone_fast()
586 bio->bi_opf = bio_src->bi_opf; in __bio_clone_fast()
587 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone_fast()
588 bio->bi_write_hint = bio_src->bi_write_hint; in __bio_clone_fast()
589 bio->bi_iter = bio_src->bi_iter; in __bio_clone_fast()
590 bio->bi_io_vec = bio_src->bi_io_vec; in __bio_clone_fast()
592 bio_clone_blkg_association(bio, bio_src); in __bio_clone_fast()
593 blkcg_bio_issue_init(bio); in __bio_clone_fast()
605 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) in bio_clone_fast() argument
607 struct bio *b; in bio_clone_fast()
613 __bio_clone_fast(b, bio); in bio_clone_fast()
615 if (bio_integrity(bio)) { in bio_clone_fast()
618 ret = bio_integrity_clone(b, bio, gfp_mask); in bio_clone_fast()
649 static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio, in bio_try_merge_pc_page() argument
653 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_try_merge_pc_page()
662 return __bio_try_merge_page(bio, page, len, offset, same_page); in bio_try_merge_pc_page()
681 static int __bio_add_pc_page(struct request_queue *q, struct bio *bio, in __bio_add_pc_page() argument
690 if (unlikely(bio_flagged(bio, BIO_CLONED))) in __bio_add_pc_page()
693 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) in __bio_add_pc_page()
696 if (bio->bi_vcnt > 0) { in __bio_add_pc_page()
697 if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page)) in __bio_add_pc_page()
704 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_add_pc_page()
709 if (bio_full(bio, len)) in __bio_add_pc_page()
712 if (bio->bi_vcnt >= queue_max_segments(q)) in __bio_add_pc_page()
715 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_pc_page()
719 bio->bi_vcnt++; in __bio_add_pc_page()
720 bio->bi_iter.bi_size += len; in __bio_add_pc_page()
724 int bio_add_pc_page(struct request_queue *q, struct bio *bio, in bio_add_pc_page() argument
728 return __bio_add_pc_page(q, bio, page, len, offset, &same_page); in bio_add_pc_page()
748 bool __bio_try_merge_page(struct bio *bio, struct page *page, in __bio_try_merge_page() argument
751 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in __bio_try_merge_page()
754 if (bio->bi_vcnt > 0 && !bio_full(bio, len)) { in __bio_try_merge_page()
755 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_try_merge_page()
759 bio->bi_iter.bi_size += len; in __bio_try_merge_page()
777 void __bio_add_page(struct bio *bio, struct page *page, in __bio_add_page() argument
780 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_page()
782 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in __bio_add_page()
783 WARN_ON_ONCE(bio_full(bio, len)); in __bio_add_page()
789 bio->bi_iter.bi_size += len; in __bio_add_page()
790 bio->bi_vcnt++; in __bio_add_page()
792 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) in __bio_add_page()
793 bio_set_flag(bio, BIO_WORKINGSET); in __bio_add_page()
807 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
812 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { in bio_add_page()
813 if (bio_full(bio, len)) in bio_add_page()
815 __bio_add_page(bio, page, len, offset); in bio_add_page()
821 void bio_release_pages(struct bio *bio, bool mark_dirty) in bio_release_pages() argument
826 if (bio_flagged(bio, BIO_NO_PAGE_REF)) in bio_release_pages()
829 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_release_pages()
836 static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_bvec_add_pages() argument
846 size = bio_add_page(bio, bv->bv_page, len, in __bio_iov_bvec_add_pages()
866 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_iter_get_pages() argument
868 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
869 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
870 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
894 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { in __bio_iov_iter_get_pages()
898 if (WARN_ON_ONCE(bio_full(bio, len))) in __bio_iov_iter_get_pages()
900 __bio_add_page(bio, page, len, offset); in __bio_iov_iter_get_pages()
929 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in bio_iov_iter_get_pages() argument
934 if (WARN_ON_ONCE(bio->bi_vcnt)) in bio_iov_iter_get_pages()
939 ret = __bio_iov_bvec_add_pages(bio, iter); in bio_iov_iter_get_pages()
941 ret = __bio_iov_iter_get_pages(bio, iter); in bio_iov_iter_get_pages()
942 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); in bio_iov_iter_get_pages()
945 bio_set_flag(bio, BIO_NO_PAGE_REF); in bio_iov_iter_get_pages()
946 return bio->bi_vcnt ? 0 : ret; in bio_iov_iter_get_pages()
949 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
951 complete(bio->bi_private); in submit_bio_wait_endio()
965 int submit_bio_wait(struct bio *bio) in submit_bio_wait() argument
967 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); in submit_bio_wait()
969 bio->bi_private = &done; in submit_bio_wait()
970 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
971 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
972 submit_bio(bio); in submit_bio_wait()
975 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
990 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument
992 if (bio_integrity(bio)) in bio_advance()
993 bio_integrity_advance(bio, bytes); in bio_advance()
995 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance()
999 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, in bio_copy_data_iter()
1000 struct bio *src, struct bvec_iter *src_iter) in bio_copy_data_iter()
1038 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1057 void bio_list_copy_data(struct bio *dst, struct bio *src) in bio_list_copy_data()
1114 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument
1119 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
1145 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
1150 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
1168 void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1173 bio_for_each_segment_all(bvec, bio, iter_all) in bio_free_pages()
1185 int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
1187 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
1190 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { in bio_uncopy_user()
1198 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
1199 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
1201 bio_free_pages(bio); in bio_uncopy_user()
1204 bio_put(bio); in bio_uncopy_user()
1219 struct bio *bio_copy_user_iov(struct request_queue *q, in bio_copy_user_iov()
1226 struct bio *bio; in bio_copy_user_iov() local
1248 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov()
1249 if (!bio) in bio_copy_user_iov()
1284 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
1298 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
1305 ret = bio_copy_from_iter(bio, iter); in bio_copy_user_iov()
1310 zero_fill_bio(bio); in bio_copy_user_iov()
1311 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
1314 bio->bi_private = bmd; in bio_copy_user_iov()
1316 bio_set_flag(bio, BIO_NULL_MAPPED); in bio_copy_user_iov()
1317 return bio; in bio_copy_user_iov()
1320 bio_free_pages(bio); in bio_copy_user_iov()
1321 bio_put(bio); in bio_copy_user_iov()
1336 struct bio *bio_map_user_iov(struct request_queue *q, in bio_map_user_iov()
1341 struct bio *bio; in bio_map_user_iov() local
1347 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); in bio_map_user_iov()
1348 if (!bio) in bio_map_user_iov()
1377 if (!__bio_add_pc_page(q, bio, page, n, offs, in bio_map_user_iov()
1401 bio_set_flag(bio, BIO_USER_MAPPED); in bio_map_user_iov()
1409 bio_get(bio); in bio_map_user_iov()
1410 return bio; in bio_map_user_iov()
1413 bio_release_pages(bio, false); in bio_map_user_iov()
1414 bio_put(bio); in bio_map_user_iov()
1427 void bio_unmap_user(struct bio *bio) in bio_unmap_user() argument
1429 bio_release_pages(bio, bio_data_dir(bio) == READ); in bio_unmap_user()
1430 bio_put(bio); in bio_unmap_user()
1431 bio_put(bio); in bio_unmap_user()
1434 static void bio_invalidate_vmalloc_pages(struct bio *bio) in bio_invalidate_vmalloc_pages() argument
1437 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
1440 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
1441 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
1442 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
1447 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
1449 bio_invalidate_vmalloc_pages(bio); in bio_map_kern_endio()
1450 bio_put(bio); in bio_map_kern_endio()
1463 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, in bio_map_kern()
1473 struct bio *bio; in bio_map_kern() local
1475 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern()
1476 if (!bio) in bio_map_kern()
1481 bio->bi_private = data; in bio_map_kern()
1498 if (bio_add_pc_page(q, bio, page, bytes, in bio_map_kern()
1501 bio_put(bio); in bio_map_kern()
1510 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
1511 return bio; in bio_map_kern()
1514 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
1516 bio_free_pages(bio); in bio_copy_kern_endio()
1517 bio_put(bio); in bio_copy_kern_endio()
1520 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
1522 char *p = bio->bi_private; in bio_copy_kern_endio_read()
1526 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_kern_endio_read()
1531 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
1545 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, in bio_copy_kern()
1551 struct bio *bio; in bio_copy_kern() local
1562 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_kern()
1563 if (!bio) in bio_copy_kern()
1580 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) in bio_copy_kern()
1588 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
1589 bio->bi_private = data; in bio_copy_kern()
1591 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
1594 return bio; in bio_copy_kern()
1597 bio_free_pages(bio); in bio_copy_kern()
1598 bio_put(bio); in bio_copy_kern()
1631 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1636 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_set_pages_dirty()
1657 static struct bio *bio_dirty_list;
1664 struct bio *bio, *next; in bio_dirty_fn() local
1671 while ((bio = next) != NULL) { in bio_dirty_fn()
1672 next = bio->bi_private; in bio_dirty_fn()
1674 bio_release_pages(bio, true); in bio_dirty_fn()
1675 bio_put(bio); in bio_dirty_fn()
1679 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1685 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_check_pages_dirty()
1690 bio_release_pages(bio, false); in bio_check_pages_dirty()
1691 bio_put(bio); in bio_check_pages_dirty()
1695 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1696 bio_dirty_list = bio; in bio_check_pages_dirty()
1751 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1757 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1760 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1762 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1763 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1784 void bio_endio(struct bio *bio) in bio_endio() argument
1787 if (!bio_remaining_done(bio)) in bio_endio()
1789 if (!bio_integrity_endio(bio)) in bio_endio()
1792 if (bio->bi_disk) in bio_endio()
1793 rq_qos_done_bio(bio->bi_disk->queue, bio); in bio_endio()
1803 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1804 bio = __bio_chain_endio(bio); in bio_endio()
1808 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1809 trace_block_bio_complete(bio->bi_disk->queue, bio, in bio_endio()
1810 blk_status_to_errno(bio->bi_status)); in bio_endio()
1811 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in bio_endio()
1814 blk_throtl_bio_endio(bio); in bio_endio()
1816 bio_uninit(bio); in bio_endio()
1817 if (bio->bi_end_io) in bio_endio()
1818 bio->bi_end_io(bio); in bio_endio()
1836 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1839 struct bio *split; in bio_split()
1842 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1844 split = bio_clone_fast(bio, gfp, bs); in bio_split()
1853 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1855 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) in bio_split()
1868 void bio_trim(struct bio *bio, int offset, int size) in bio_trim() argument
1875 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1878 bio_advance(bio, offset << 9); in bio_trim()
1879 bio->bi_iter.bi_size = size; in bio_trim()
1881 if (bio_integrity(bio)) in bio_trim()
1882 bio_integrity_trim(bio); in bio_trim()
2005 void bio_disassociate_blkg(struct bio *bio) in bio_disassociate_blkg() argument
2007 if (bio->bi_blkg) { in bio_disassociate_blkg()
2008 blkg_put(bio->bi_blkg); in bio_disassociate_blkg()
2009 bio->bi_blkg = NULL; in bio_disassociate_blkg()
2028 static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) in __bio_associate_blkg() argument
2030 bio_disassociate_blkg(bio); in __bio_associate_blkg()
2032 bio->bi_blkg = blkg_tryget_closest(blkg); in __bio_associate_blkg()
2044 void bio_associate_blkg_from_css(struct bio *bio, in bio_associate_blkg_from_css() argument
2047 struct request_queue *q = bio->bi_disk->queue; in bio_associate_blkg_from_css()
2057 __bio_associate_blkg(bio, blkg); in bio_associate_blkg_from_css()
2073 void bio_associate_blkg_from_page(struct bio *bio, struct page *page) in bio_associate_blkg_from_page() argument
2083 bio_associate_blkg_from_css(bio, css); in bio_associate_blkg_from_page()
2098 void bio_associate_blkg(struct bio *bio) in bio_associate_blkg() argument
2104 if (bio->bi_blkg) in bio_associate_blkg()
2105 css = &bio_blkcg(bio)->css; in bio_associate_blkg()
2109 bio_associate_blkg_from_css(bio, css); in bio_associate_blkg()
2120 void bio_clone_blkg_association(struct bio *dst, struct bio *src) in bio_clone_blkg_association()