Lines Matching refs:bio
109 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
208 void bio_uninit(struct bio *bio) in bio_uninit() argument
211 if (bio->bi_blkg) { in bio_uninit()
212 blkg_put(bio->bi_blkg); in bio_uninit()
213 bio->bi_blkg = NULL; in bio_uninit()
216 if (bio_integrity(bio)) in bio_uninit()
217 bio_integrity_free(bio); in bio_uninit()
219 bio_crypt_free_ctx(bio); in bio_uninit()
223 static void bio_free(struct bio *bio) in bio_free() argument
225 struct bio_set *bs = bio->bi_pool; in bio_free()
228 bio_uninit(bio); in bio_free()
231 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
236 p = bio; in bio_free()
242 kfree(bio); in bio_free()
251 void bio_init(struct bio *bio, struct bio_vec *table, in bio_init() argument
254 bio->bi_next = NULL; in bio_init()
255 bio->bi_bdev = NULL; in bio_init()
256 bio->bi_opf = 0; in bio_init()
257 bio->bi_flags = 0; in bio_init()
258 bio->bi_ioprio = 0; in bio_init()
259 bio->bi_write_hint = 0; in bio_init()
260 bio->bi_status = 0; in bio_init()
261 bio->bi_iter.bi_sector = 0; in bio_init()
262 bio->bi_iter.bi_size = 0; in bio_init()
263 bio->bi_iter.bi_idx = 0; in bio_init()
264 bio->bi_iter.bi_bvec_done = 0; in bio_init()
265 bio->bi_end_io = NULL; in bio_init()
266 bio->bi_private = NULL; in bio_init()
268 bio->bi_blkg = NULL; in bio_init()
269 bio->bi_issue.value = 0; in bio_init()
271 bio->bi_iocost_cost = 0; in bio_init()
275 bio->bi_crypt_context = NULL; in bio_init()
278 bio->bi_integrity = NULL; in bio_init()
280 bio->bi_vcnt = 0; in bio_init()
282 atomic_set(&bio->__bi_remaining, 1); in bio_init()
283 atomic_set(&bio->__bi_cnt, 1); in bio_init()
285 bio->bi_max_vecs = max_vecs; in bio_init()
286 bio->bi_io_vec = table; in bio_init()
287 bio->bi_pool = NULL; in bio_init()
301 void bio_reset(struct bio *bio) in bio_reset() argument
303 bio_uninit(bio); in bio_reset()
304 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
305 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
309 static struct bio *__bio_chain_endio(struct bio *bio) in __bio_chain_endio() argument
311 struct bio *parent = bio->bi_private; in __bio_chain_endio()
313 if (bio->bi_status && !parent->bi_status) in __bio_chain_endio()
314 parent->bi_status = bio->bi_status; in __bio_chain_endio()
315 bio_put(bio); in __bio_chain_endio()
319 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
321 bio_endio(__bio_chain_endio(bio)); in bio_chain_endio()
335 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
337 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
339 bio->bi_private = parent; in bio_chain()
340 bio->bi_end_io = bio_chain_endio; in bio_chain()
348 struct bio *bio; in bio_alloc_rescue() local
352 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
355 if (!bio) in bio_alloc_rescue()
358 submit_bio_noacct(bio); in bio_alloc_rescue()
365 struct bio *bio; in punt_bios_to_rescuer() local
383 while ((bio = bio_list_pop(¤t->bio_list[0]))) in punt_bios_to_rescuer()
384 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
388 while ((bio = bio_list_pop(¤t->bio_list[1]))) in punt_bios_to_rescuer()
389 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
431 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs, in bio_alloc_bioset()
435 struct bio *bio; in bio_alloc_bioset() local
475 bio = p + bs->front_pad; in bio_alloc_bioset()
488 bio_init(bio, bvl, nr_iovecs); in bio_alloc_bioset()
490 bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); in bio_alloc_bioset()
492 bio_init(bio, NULL, 0); in bio_alloc_bioset()
495 bio->bi_pool = bs; in bio_alloc_bioset()
496 return bio; in bio_alloc_bioset()
513 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs) in bio_kmalloc()
515 struct bio *bio; in bio_kmalloc() local
520 bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); in bio_kmalloc()
521 if (unlikely(!bio)) in bio_kmalloc()
523 bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs); in bio_kmalloc()
524 bio->bi_pool = NULL; in bio_kmalloc()
525 return bio; in bio_kmalloc()
529 void zero_fill_bio(struct bio *bio) in zero_fill_bio() argument
534 bio_for_each_segment(bv, bio, iter) in zero_fill_bio()
549 void bio_truncate(struct bio *bio, unsigned new_size) in bio_truncate() argument
556 if (new_size >= bio->bi_iter.bi_size) in bio_truncate()
559 if (bio_op(bio) != REQ_OP_READ) in bio_truncate()
562 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
585 bio->bi_iter.bi_size = new_size; in bio_truncate()
600 void guard_bio_eod(struct bio *bio) in guard_bio_eod() argument
602 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in guard_bio_eod()
612 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
615 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
616 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod()
619 bio_truncate(bio, maxsector << 9); in guard_bio_eod()
629 struct bio *bio; in bio_alloc_cache_prune() local
631 while ((bio = bio_list_pop(&cache->free_list)) != NULL) { in bio_alloc_cache_prune()
633 bio_free(bio); in bio_alloc_cache_prune()
677 void bio_put(struct bio *bio) in bio_put() argument
679 if (unlikely(bio_flagged(bio, BIO_REFFED))) { in bio_put()
680 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
681 if (!atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
685 if (bio_flagged(bio, BIO_PERCPU_CACHE)) { in bio_put()
688 bio_uninit(bio); in bio_put()
689 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); in bio_put()
690 bio_list_add_head(&cache->free_list, bio); in bio_put()
695 bio_free(bio); in bio_put()
711 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) in __bio_clone_fast() argument
713 WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs); in __bio_clone_fast()
719 bio->bi_bdev = bio_src->bi_bdev; in __bio_clone_fast()
720 bio_set_flag(bio, BIO_CLONED); in __bio_clone_fast()
722 bio_set_flag(bio, BIO_THROTTLED); in __bio_clone_fast()
724 bio_set_flag(bio, BIO_REMAPPED); in __bio_clone_fast()
725 bio->bi_opf = bio_src->bi_opf; in __bio_clone_fast()
726 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone_fast()
727 bio->bi_write_hint = bio_src->bi_write_hint; in __bio_clone_fast()
728 bio->bi_iter = bio_src->bi_iter; in __bio_clone_fast()
729 bio->bi_io_vec = bio_src->bi_io_vec; in __bio_clone_fast()
731 bio_clone_blkg_association(bio, bio_src); in __bio_clone_fast()
732 blkcg_bio_issue_init(bio); in __bio_clone_fast()
744 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) in bio_clone_fast() argument
746 struct bio *b; in bio_clone_fast()
752 __bio_clone_fast(b, bio); in bio_clone_fast()
754 if (bio_crypt_clone(b, bio, gfp_mask) < 0) in bio_clone_fast()
757 if (bio_integrity(bio) && in bio_clone_fast()
758 bio_integrity_clone(b, bio, gfp_mask) < 0) in bio_clone_fast()
769 const char *bio_devname(struct bio *bio, char *buf) in bio_devname() argument
771 return bdevname(bio->bi_bdev, buf); in bio_devname()
799 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, in bio_try_merge_hw_seg() argument
803 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_try_merge_hw_seg()
812 return __bio_try_merge_page(bio, page, len, offset, same_page); in bio_try_merge_hw_seg()
828 int bio_add_hw_page(struct request_queue *q, struct bio *bio, in bio_add_hw_page() argument
834 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_hw_page()
837 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) in bio_add_hw_page()
840 if (bio->bi_vcnt > 0) { in bio_add_hw_page()
841 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) in bio_add_hw_page()
848 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_hw_page()
853 if (bio_full(bio, len)) in bio_add_hw_page()
856 if (bio->bi_vcnt >= queue_max_segments(q)) in bio_add_hw_page()
859 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in bio_add_hw_page()
863 bio->bi_vcnt++; in bio_add_hw_page()
864 bio->bi_iter.bi_size += len; in bio_add_hw_page()
883 int bio_add_pc_page(struct request_queue *q, struct bio *bio, in bio_add_pc_page() argument
887 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_pc_page()
908 int bio_add_zone_append_page(struct bio *bio, struct page *page, in bio_add_zone_append_page() argument
911 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in bio_add_zone_append_page()
914 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) in bio_add_zone_append_page()
920 return bio_add_hw_page(q, bio, page, len, offset, in bio_add_zone_append_page()
941 bool __bio_try_merge_page(struct bio *bio, struct page *page, in __bio_try_merge_page() argument
944 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in __bio_try_merge_page()
947 if (bio->bi_vcnt > 0) { in __bio_try_merge_page()
948 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_try_merge_page()
951 if (bio->bi_iter.bi_size > UINT_MAX - len) { in __bio_try_merge_page()
956 bio->bi_iter.bi_size += len; in __bio_try_merge_page()
974 void __bio_add_page(struct bio *bio, struct page *page, in __bio_add_page() argument
977 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_page()
979 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in __bio_add_page()
980 WARN_ON_ONCE(bio_full(bio, len)); in __bio_add_page()
986 bio->bi_iter.bi_size += len; in __bio_add_page()
987 bio->bi_vcnt++; in __bio_add_page()
989 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) in __bio_add_page()
990 bio_set_flag(bio, BIO_WORKINGSET); in __bio_add_page()
1004 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
1009 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { in bio_add_page()
1010 if (bio_full(bio, len)) in bio_add_page()
1012 __bio_add_page(bio, page, len, offset); in bio_add_page()
1018 void bio_release_pages(struct bio *bio, bool mark_dirty) in bio_release_pages() argument
1023 if (bio_flagged(bio, BIO_NO_PAGE_REF)) in bio_release_pages()
1026 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_release_pages()
1034 static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) in __bio_iov_bvec_set() argument
1036 WARN_ON_ONCE(bio->bi_max_vecs); in __bio_iov_bvec_set()
1038 bio->bi_vcnt = iter->nr_segs; in __bio_iov_bvec_set()
1039 bio->bi_io_vec = (struct bio_vec *)iter->bvec; in __bio_iov_bvec_set()
1040 bio->bi_iter.bi_bvec_done = iter->iov_offset; in __bio_iov_bvec_set()
1041 bio->bi_iter.bi_size = iter->count; in __bio_iov_bvec_set()
1042 bio_set_flag(bio, BIO_NO_PAGE_REF); in __bio_iov_bvec_set()
1043 bio_set_flag(bio, BIO_CLONED); in __bio_iov_bvec_set()
1046 static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) in bio_iov_bvec_set() argument
1048 __bio_iov_bvec_set(bio, iter); in bio_iov_bvec_set()
1053 static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) in bio_iov_bvec_set_append() argument
1055 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in bio_iov_bvec_set_append()
1059 __bio_iov_bvec_set(bio, &i); in bio_iov_bvec_set_append()
1084 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_iter_get_pages() argument
1086 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1087 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1088 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
1112 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { in __bio_iov_iter_get_pages()
1116 if (WARN_ON_ONCE(bio_full(bio, len))) { in __bio_iov_iter_get_pages()
1120 __bio_add_page(bio, page, len, offset); in __bio_iov_iter_get_pages()
1129 static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) in __bio_iov_append_get_pages() argument
1131 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_append_get_pages()
1132 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_append_get_pages()
1133 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in __bio_iov_append_get_pages()
1135 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_append_get_pages()
1162 if (bio_add_hw_page(q, bio, page, len, offset, in __bio_iov_append_get_pages()
1200 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) in bio_iov_iter_get_pages() argument
1205 if (bio_op(bio) == REQ_OP_ZONE_APPEND) in bio_iov_iter_get_pages()
1206 return bio_iov_bvec_set_append(bio, iter); in bio_iov_iter_get_pages()
1207 return bio_iov_bvec_set(bio, iter); in bio_iov_iter_get_pages()
1211 if (bio_op(bio) == REQ_OP_ZONE_APPEND) in bio_iov_iter_get_pages()
1212 ret = __bio_iov_append_get_pages(bio, iter); in bio_iov_iter_get_pages()
1214 ret = __bio_iov_iter_get_pages(bio, iter); in bio_iov_iter_get_pages()
1215 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); in bio_iov_iter_get_pages()
1218 bio_clear_flag(bio, BIO_WORKINGSET); in bio_iov_iter_get_pages()
1219 return bio->bi_vcnt ? 0 : ret; in bio_iov_iter_get_pages()
1223 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
1225 complete(bio->bi_private); in submit_bio_wait_endio()
1239 int submit_bio_wait(struct bio *bio) in submit_bio_wait() argument
1242 bio->bi_bdev->bd_disk->lockdep_map); in submit_bio_wait()
1245 bio->bi_private = &done; in submit_bio_wait()
1246 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
1247 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
1248 submit_bio(bio); in submit_bio_wait()
1259 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
1274 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument
1276 if (bio_integrity(bio)) in bio_advance()
1277 bio_integrity_advance(bio, bytes); in bio_advance()
1279 bio_crypt_advance(bio, bytes); in bio_advance()
1280 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance()
1284 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, in bio_copy_data_iter()
1285 struct bio *src, struct bvec_iter *src_iter) in bio_copy_data_iter()
1311 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1320 void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1325 bio_for_each_segment_all(bvec, bio, iter_all) in bio_free_pages()
1359 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1364 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_set_pages_dirty()
1385 static struct bio *bio_dirty_list;
1392 struct bio *bio, *next; in bio_dirty_fn() local
1399 while ((bio = next) != NULL) { in bio_dirty_fn()
1400 next = bio->bi_private; in bio_dirty_fn()
1402 bio_release_pages(bio, true); in bio_dirty_fn()
1403 bio_put(bio); in bio_dirty_fn()
1407 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1413 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_check_pages_dirty()
1418 bio_release_pages(bio, false); in bio_check_pages_dirty()
1419 bio_put(bio); in bio_check_pages_dirty()
1423 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1424 bio_dirty_list = bio; in bio_check_pages_dirty()
1429 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1435 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1438 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1440 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1441 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1461 void bio_endio(struct bio *bio) in bio_endio() argument
1464 if (!bio_remaining_done(bio)) in bio_endio()
1466 if (!bio_integrity_endio(bio)) in bio_endio()
1469 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) in bio_endio()
1470 rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); in bio_endio()
1472 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1473 trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio); in bio_endio()
1474 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in bio_endio()
1485 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1486 bio = __bio_chain_endio(bio); in bio_endio()
1490 blk_throtl_bio_endio(bio); in bio_endio()
1492 bio_uninit(bio); in bio_endio()
1493 if (bio->bi_end_io) in bio_endio()
1494 bio->bi_end_io(bio); in bio_endio()
1512 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1515 struct bio *split; in bio_split()
1518 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1521 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) in bio_split()
1524 split = bio_clone_fast(bio, gfp, bs); in bio_split()
1533 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1535 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) in bio_split()
1551 void bio_trim(struct bio *bio, sector_t offset, sector_t size) in bio_trim() argument
1554 offset + size > bio->bi_iter.bi_size)) in bio_trim()
1558 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1561 bio_advance(bio, offset << 9); in bio_trim()
1562 bio->bi_iter.bi_size = size; in bio_trim()
1564 if (bio_integrity(bio)) in bio_trim()
1565 bio_integrity_trim(bio); in bio_trim()
1702 struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, in bio_alloc_kiocb()
1706 struct bio *bio; in bio_alloc_kiocb() local
1712 bio = bio_list_pop(&cache->free_list); in bio_alloc_kiocb()
1713 if (bio) { in bio_alloc_kiocb()
1716 bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); in bio_alloc_kiocb()
1717 bio->bi_pool = bs; in bio_alloc_kiocb()
1718 bio_set_flag(bio, BIO_PERCPU_CACHE); in bio_alloc_kiocb()
1719 return bio; in bio_alloc_kiocb()
1722 bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); in bio_alloc_kiocb()
1723 bio_set_flag(bio, BIO_PERCPU_CACHE); in bio_alloc_kiocb()
1724 return bio; in bio_alloc_kiocb()