| /Linux-v5.15/include/linux/ |
| D | bio.h | 29 #define bio_prio(bio) (bio)->bi_ioprio argument 30 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) argument 32 #define bio_iter_iovec(bio, iter) \ argument 33 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 35 #define bio_iter_page(bio, iter) \ argument 36 bvec_iter_page((bio)->bi_io_vec, (iter)) 37 #define bio_iter_len(bio, iter) \ argument 38 bvec_iter_len((bio)->bi_io_vec, (iter)) 39 #define bio_iter_offset(bio, iter) \ argument 40 bvec_iter_offset((bio)->bi_io_vec, (iter)) [all …]
|
| /Linux-v5.15/block/ |
| D | bio.c | 109 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size() 208 void bio_uninit(struct bio *bio) in bio_uninit() argument 211 if (bio->bi_blkg) { in bio_uninit() 212 blkg_put(bio->bi_blkg); in bio_uninit() 213 bio->bi_blkg = NULL; in bio_uninit() 216 if (bio_integrity(bio)) in bio_uninit() 217 bio_integrity_free(bio); in bio_uninit() 219 bio_crypt_free_ctx(bio); in bio_uninit() 223 static void bio_free(struct bio *bio) in bio_free() argument 225 struct bio_set *bs = bio->bi_pool; in bio_free() [all …]
|
| D | blk-merge.c | 17 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap() 30 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap() 52 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument 54 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge() 57 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument 59 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge() 62 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() 63 struct bio *bio, in blk_bio_discard_split() argument 86 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split() 97 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split() [all …]
|
| D | blk-map.c | 46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument 51 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter() 77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument 82 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter() 107 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument 109 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user() 120 else if (bio_data_dir(bio) == READ) in bio_uncopy_user() 121 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user() 123 bio_free_pages(bio); in bio_uncopy_user() 134 struct bio *bio; in bio_copy_user_iov() local [all …]
|
| D | bounce.c | 75 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() 101 static void bounce_end_io(struct bio *bio) in bounce_end_io() argument 103 struct bio *bio_orig = bio->bi_private; in bounce_end_io() 111 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io() 120 bio_orig->bi_status = bio->bi_status; in bounce_end_io() 122 bio_put(bio); in bounce_end_io() 125 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument 127 bounce_end_io(bio); in bounce_end_io_write() 130 static void bounce_end_io_read(struct bio *bio) in bounce_end_io_read() argument 132 struct bio *bio_orig = bio->bi_private; in bounce_end_io_read() [all …]
|
| D | blk-lib.c | 13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) in blk_next_bio() argument 15 struct bio *new = bio_alloc(gfp, nr_pages); in blk_next_bio() 17 if (bio) { in blk_next_bio() 18 bio_chain(bio, new); in blk_next_bio() 19 submit_bio(bio); in blk_next_bio() 28 struct bio **biop) in __blkdev_issue_discard() 31 struct bio *bio = *biop; in __blkdev_issue_discard() local 98 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 99 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard() 100 bio_set_dev(bio, bdev); in __blkdev_issue_discard() [all …]
|
| D | blk-crypto-internal.h | 26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 32 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 35 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable() 39 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 41 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable() 42 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable() 66 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 72 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 78 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 98 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); [all …]
|
| D | bio-integrity.c | 48 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument 53 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc() 56 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) in bio_integrity_alloc() 83 bip->bip_bio = bio; in bio_integrity_alloc() 84 bio->bi_integrity = bip; in bio_integrity_alloc() 85 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc() 101 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument 103 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free() 104 struct bio_set *bs = bio->bi_pool; in bio_integrity_free() 110 bio->bi_integrity = NULL; in bio_integrity_free() [all …]
|
| D | blk-core.c | 236 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument 240 bio->bi_status = error; in req_bio_endio() 243 bio_set_flag(bio, BIO_QUIET); in req_bio_endio() 245 bio_advance(bio, nbytes); in req_bio_endio() 252 if (bio->bi_iter.bi_size) in req_bio_endio() 253 bio->bi_status = BLK_STS_IOERR; in req_bio_endio() 255 bio->bi_iter.bi_sector = rq->__sector; in req_bio_endio() 259 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio() 260 bio_endio(bio); in req_bio_endio() 273 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags() [all …]
|
| D | blk.h | 99 bool __bio_integrity_endio(struct bio *); 100 void bio_integrity_free(struct bio *bio); 101 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument 103 if (bio_integrity(bio)) in bio_integrity_endio() 104 return __bio_integrity_endio(bio); in bio_integrity_endio() 111 struct bio *); 114 struct bio *next) in integrity_req_gap_back_merge() 116 struct bio_integrity_payload *bip = bio_integrity(req->bio); in integrity_req_gap_back_merge() 124 struct bio *bio) in integrity_req_gap_front_merge() argument 126 struct bio_integrity_payload *bip = bio_integrity(bio); in integrity_req_gap_front_merge() [all …]
|
| D | blk-crypto-fallback.c | 51 struct bio *bio; member 147 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) in blk_crypto_fallback_encrypt_endio() 149 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_fallback_encrypt_endio() 162 static struct bio *blk_crypto_clone_bio(struct bio *bio_src) in blk_crypto_clone_bio() 166 struct bio *bio; in blk_crypto_clone_bio() local 168 bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src)); in blk_crypto_clone_bio() 169 if (!bio) in blk_crypto_clone_bio() 171 bio->bi_bdev = bio_src->bi_bdev; in blk_crypto_clone_bio() 173 bio_set_flag(bio, BIO_REMAPPED); in blk_crypto_clone_bio() 174 bio->bi_opf = bio_src->bi_opf; in blk_crypto_clone_bio() [all …]
|
| D | fops.c | 46 static void blkdev_bio_end_io_simple(struct bio *bio) in blkdev_bio_end_io_simple() argument 48 struct task_struct *waiter = bio->bi_private; in blkdev_bio_end_io_simple() 50 WRITE_ONCE(bio->bi_private, NULL); in blkdev_bio_end_io_simple() 62 struct bio bio; in __blkdev_direct_IO_simple() local 79 bio_init(&bio, vecs, nr_pages); in __blkdev_direct_IO_simple() 80 bio_set_dev(&bio, bdev); in __blkdev_direct_IO_simple() 81 bio.bi_iter.bi_sector = pos >> 9; in __blkdev_direct_IO_simple() 82 bio.bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO_simple() 83 bio.bi_private = current; in __blkdev_direct_IO_simple() 84 bio.bi_end_io = blkdev_bio_end_io_simple; in __blkdev_direct_IO_simple() [all …]
|
| D | blk-rq-qos.h | 39 void (*throttle)(struct rq_qos *, struct bio *); 40 void (*track)(struct rq_qos *, struct request *, struct bio *); 41 void (*merge)(struct rq_qos *, struct request *, struct bio *); 45 void (*done_bio)(struct rq_qos *, struct bio *); 46 void (*cleanup)(struct rq_qos *, struct bio *); 146 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 150 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 151 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 152 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 153 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); [all …]
|
| /Linux-v5.15/drivers/md/bcache/ |
| D | request.c | 40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument 46 bio_for_each_segment(bv, bio, iter) { in bio_csum() 111 struct bio *bio = op->bio; in bch_data_invalidate() local 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 134 bio_put(bio); in bch_data_invalidate() [all …]
|
| D | io.c | 17 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument 19 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free() 24 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc() 27 struct bio *bio = &b->bio; in bch_bbio_alloc() local 29 bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb)); in bch_bbio_alloc() 31 return bio; in bch_bbio_alloc() 34 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument 36 struct bbio *b = container_of(bio, struct bbio, bio); in __bch_submit_bbio() 38 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio() 39 bio_set_dev(bio, c->cache->bdev); in __bch_submit_bbio() [all …]
|
| D | movinggc.c | 19 struct bbio bio; member 48 struct bio *bio = &io->bio.bio; in write_moving_finish() local 50 bio_free_pages(bio); in write_moving_finish() 62 static void read_moving_endio(struct bio *bio) in read_moving_endio() argument 64 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio() 65 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio() 68 if (bio->bi_status) in read_moving_endio() 69 io->op.status = bio->bi_status; in read_moving_endio() 75 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); in read_moving_endio() 80 struct bio *bio = &io->bio.bio; in moving_init() local [all …]
|
| /Linux-v5.15/fs/xfs/ |
| D | xfs_bio_io.c | 14 struct bio *bio) in xfs_flush_bdev_async_endio() argument 16 complete(bio->bi_private); in xfs_flush_bdev_async_endio() 28 struct bio *bio, in xfs_flush_bdev_async() argument 39 bio_init(bio, NULL, 0); in xfs_flush_bdev_async() 40 bio_set_dev(bio, bdev); in xfs_flush_bdev_async() 41 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; in xfs_flush_bdev_async() 42 bio->bi_private = done; in xfs_flush_bdev_async() 43 bio->bi_end_io = xfs_flush_bdev_async_endio; in xfs_flush_bdev_async() 45 submit_bio(bio); in xfs_flush_bdev_async() 59 struct bio *bio; in xfs_rw_bdev() local [all …]
|
| /Linux-v5.15/fs/ext4/ |
| D | readpage.c | 64 struct bio *bio; member 70 static void __read_end_io(struct bio *bio) in __read_end_io() argument 76 bio_for_each_segment_all(bv, bio, iter_all) { in __read_end_io() 80 if (bio->bi_status || PageError(page)) { in __read_end_io() 89 if (bio->bi_private) in __read_end_io() 90 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io() 91 bio_put(bio); in __read_end_io() 101 fscrypt_decrypt_bio(ctx->bio); in decrypt_work() 110 struct bio *bio = ctx->bio; in verity_work() local 121 bio->bi_private = NULL; in verity_work() [all …]
|
| /Linux-v5.15/fs/ |
| D | mpage.c | 47 static void mpage_end_io(struct bio *bio) in mpage_end_io() argument 52 bio_for_each_segment_all(bv, bio, iter_all) { in mpage_end_io() 54 page_endio(page, bio_op(bio), in mpage_end_io() 55 blk_status_to_errno(bio->bi_status)); in mpage_end_io() 58 bio_put(bio); in mpage_end_io() 61 static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) in mpage_bio_submit() argument 63 bio->bi_end_io = mpage_end_io; in mpage_bio_submit() 64 bio_set_op_attrs(bio, op, op_flags); in mpage_bio_submit() 65 guard_bio_eod(bio); in mpage_bio_submit() 66 submit_bio(bio); in mpage_bio_submit() [all …]
|
| /Linux-v5.15/drivers/nvme/target/ |
| D | io-cmd-bdev.c | 170 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument 172 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done() 174 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 175 nvmet_req_bio_put(req, bio); in nvmet_bio_done() 179 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument 193 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip() 200 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip() 202 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 208 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip() 225 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument [all …]
|
| /Linux-v5.15/mm/ |
| D | page_io.c | 29 void end_swap_bio_write(struct bio *bio) in end_swap_bio_write() argument 31 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() 33 if (bio->bi_status) { in end_swap_bio_write() 45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), in end_swap_bio_write() 46 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_write() 50 bio_put(bio); in end_swap_bio_write() 101 static void end_swap_bio_read(struct bio *bio) in end_swap_bio_read() argument 103 struct page *page = bio_first_page_all(bio); in end_swap_bio_read() 104 struct task_struct *waiter = bio->bi_private; in end_swap_bio_read() 106 if (bio->bi_status) { in end_swap_bio_read() [all …]
|
| /Linux-v5.15/drivers/md/ |
| D | dm-raid1.c | 121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument 130 bio_list_add(bl, bio); in queue_bio() 140 struct bio *bio; in dispatch_bios() local 142 while ((bio = bio_list_pop(bio_list))) in dispatch_bios() 143 queue_bio(ms, bio, WRITE); in dispatch_bios() 163 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument 165 return (struct mirror *) bio->bi_next; in bio_get_m() 168 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument 170 bio->bi_next = (struct bio *) m; in bio_set_m() 437 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument [all …]
|
| D | dm-bio-record.h | 31 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) in dm_bio_record() argument 33 bd->bi_bdev = bio->bi_bdev; in dm_bio_record() 34 bd->bi_flags = bio->bi_flags; in dm_bio_record() 35 bd->bi_iter = bio->bi_iter; in dm_bio_record() 36 bd->__bi_remaining = atomic_read(&bio->__bi_remaining); in dm_bio_record() 37 bd->bi_end_io = bio->bi_end_io; in dm_bio_record() 39 bd->bi_integrity = bio_integrity(bio); in dm_bio_record() 43 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) in dm_bio_restore() argument 45 bio->bi_bdev = bd->bi_bdev; in dm_bio_restore() 46 bio->bi_flags = bd->bi_flags; in dm_bio_restore() [all …]
|
| /Linux-v5.15/fs/f2fs/ |
| D | iostat.h | 44 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 47 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in iostat_update_submit_ctx() 53 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument 55 struct bio_iostat_ctx *iostat_ctx = bio->bi_private; in get_post_read_ctx() 60 extern void iostat_update_and_unbind_ctx(struct bio *bio, int rw); 62 struct bio *bio, struct bio_post_read_ctx *ctx); 70 static inline void iostat_update_and_unbind_ctx(struct bio *bio, int rw) {} in iostat_update_and_unbind_ctx() argument 72 struct bio *bio, struct bio_post_read_ctx *ctx) {} in iostat_alloc_and_bind_ctx() argument 73 static inline void iostat_update_submit_ctx(struct bio *bio, in iostat_update_submit_ctx() argument 75 static inline struct bio_post_read_ctx *get_post_read_ctx(struct bio *bio) in get_post_read_ctx() argument [all …]
|
| /Linux-v5.15/fs/crypto/ |
| D | bio.c | 29 void fscrypt_decrypt_bio(struct bio *bio) in fscrypt_decrypt_bio() argument 34 bio_for_each_segment_all(bv, bio, iter_all) { in fscrypt_decrypt_bio() 50 struct bio *bio; in fscrypt_zeroout_range_inline_crypt() local 55 bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS); in fscrypt_zeroout_range_inline_crypt() 62 fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); in fscrypt_zeroout_range_inline_crypt() 63 bio_set_dev(bio, inode->i_sb->s_bdev); in fscrypt_zeroout_range_inline_crypt() 64 bio->bi_iter.bi_sector = in fscrypt_zeroout_range_inline_crypt() 66 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); in fscrypt_zeroout_range_inline_crypt() 68 ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); in fscrypt_zeroout_range_inline_crypt() 78 !fscrypt_mergeable_bio(bio, inode, lblk)) { in fscrypt_zeroout_range_inline_crypt() [all …]
|