Home
last modified time | relevance | path

Searched refs:bio (Results 1 – 25 of 302) sorted by relevance

12345678910>>...13

/Linux-v5.4/include/linux/
Dbio.h26 #define bio_prio(bio) (bio)->bi_ioprio argument
27 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) argument
29 #define bio_iter_iovec(bio, iter) \ argument
30 bvec_iter_bvec((bio)->bi_io_vec, (iter))
32 #define bio_iter_page(bio, iter) \ argument
33 bvec_iter_page((bio)->bi_io_vec, (iter))
34 #define bio_iter_len(bio, iter) \ argument
35 bvec_iter_len((bio)->bi_io_vec, (iter))
36 #define bio_iter_offset(bio, iter) \ argument
37 bvec_iter_offset((bio)->bi_io_vec, (iter))
[all …]
/Linux-v5.4/block/
Dbio.c64 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab()
233 void bio_uninit(struct bio *bio) in bio_uninit() argument
235 bio_disassociate_blkg(bio); in bio_uninit()
239 static void bio_free(struct bio *bio) in bio_free() argument
241 struct bio_set *bs = bio->bi_pool; in bio_free()
244 bio_uninit(bio); in bio_free()
247 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); in bio_free()
252 p = bio; in bio_free()
258 kfree(bio); in bio_free()
267 void bio_init(struct bio *bio, struct bio_vec *table, in bio_init() argument
[all …]
Dblk-map.c18 int blk_rq_append_bio(struct request *rq, struct bio **bio) in blk_rq_append_bio() argument
20 struct bio *orig_bio = *bio; in blk_rq_append_bio()
25 blk_queue_bounce(rq->q, bio); in blk_rq_append_bio()
27 bio_for_each_bvec(bv, *bio, iter) in blk_rq_append_bio()
30 if (!rq->bio) { in blk_rq_append_bio()
31 blk_rq_bio_prep(rq, *bio, nr_segs); in blk_rq_append_bio()
33 if (!ll_back_merge_fn(rq, *bio, nr_segs)) { in blk_rq_append_bio()
34 if (orig_bio != *bio) { in blk_rq_append_bio()
35 bio_put(*bio); in blk_rq_append_bio()
36 *bio = orig_bio; in blk_rq_append_bio()
[all …]
Dblk-lib.c13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) in blk_next_bio() argument
15 struct bio *new = bio_alloc(gfp, nr_pages); in blk_next_bio()
17 if (bio) { in blk_next_bio()
18 bio_chain(bio, new); in blk_next_bio()
19 submit_bio(bio); in blk_next_bio()
27 struct bio **biop) in __blkdev_issue_discard()
30 struct bio *bio = *biop; in __blkdev_issue_discard() local
63 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard()
64 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard()
65 bio_set_dev(bio, bdev); in __blkdev_issue_discard()
[all …]
Dbounce.c132 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq()
162 static void bounce_end_io(struct bio *bio, mempool_t *pool) in bounce_end_io() argument
164 struct bio *bio_orig = bio->bi_private; in bounce_end_io()
172 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io()
181 bio_orig->bi_status = bio->bi_status; in bounce_end_io()
183 bio_put(bio); in bounce_end_io()
186 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument
188 bounce_end_io(bio, &page_pool); in bounce_end_io_write()
191 static void bounce_end_io_write_isa(struct bio *bio) in bounce_end_io_write_isa() argument
194 bounce_end_io(bio, &isa_page_pool); in bounce_end_io_write_isa()
[all …]
Dbio-integrity.c37 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument
42 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc()
72 bip->bip_bio = bio; in bio_integrity_alloc()
73 bio->bi_integrity = bip; in bio_integrity_alloc()
74 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc()
90 static void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument
92 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free()
93 struct bio_set *bs = bio->bi_pool; in bio_integrity_free()
107 bio->bi_integrity = NULL; in bio_integrity_free()
108 bio->bi_opf &= ~REQ_INTEGRITY; in bio_integrity_free()
[all …]
Dblk-core.c229 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
233 bio->bi_status = error; in req_bio_endio()
236 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
238 bio_advance(bio, nbytes); in req_bio_endio()
241 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
242 bio_endio(bio); in req_bio_endio()
255 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
599 bool bio_attempt_back_merge(struct request *req, struct bio *bio, in bio_attempt_back_merge() argument
602 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
604 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
[all …]
Dblk-merge.c16 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
29 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
51 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
53 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
56 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
58 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
61 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
62 struct bio *bio, in blk_bio_discard_split() argument
85 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
96 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
[all …]
Dblk.h109 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, in blk_rq_bio_prep() argument
113 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
114 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
115 rq->ioprio = bio_prio(bio); in blk_rq_bio_prep()
117 if (bio->bi_disk) in blk_rq_bio_prep()
118 rq->rq_disk = bio->bi_disk; in blk_rq_bio_prep()
123 bool __bio_integrity_endio(struct bio *);
124 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument
126 if (bio_integrity(bio)) in bio_integrity_endio()
127 return __bio_integrity_endio(bio); in bio_integrity_endio()
[all …]
Dblk-rq-qos.h37 void (*throttle)(struct rq_qos *, struct bio *);
38 void (*track)(struct rq_qos *, struct request *, struct bio *);
39 void (*merge)(struct rq_qos *, struct request *, struct bio *);
43 void (*done_bio)(struct rq_qos *, struct bio *);
44 void (*cleanup)(struct rq_qos *, struct bio *);
134 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
138 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
139 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
140 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
141 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
[all …]
/Linux-v5.4/drivers/md/bcache/
Drequest.c40 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument
46 bio_for_each_segment(bv, bio, iter) { in bio_csum()
123 struct bio *bio = op->bio; in bch_data_invalidate() local
126 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
128 while (bio_sectors(bio)) { in bch_data_invalidate()
129 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate()
135 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
136 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
140 bio->bi_iter.bi_sector, in bch_data_invalidate()
146 bio_put(bio); in bch_data_invalidate()
[all …]
Dio.c17 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument
19 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free()
24 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc()
27 struct bio *bio = &b->bio; in bch_bbio_alloc() local
29 bio_init(bio, bio->bi_inline_vecs, bucket_pages(c)); in bch_bbio_alloc()
31 return bio; in bch_bbio_alloc()
34 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument
36 struct bbio *b = container_of(bio, struct bbio, bio); in __bch_submit_bbio()
38 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
39 bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); in __bch_submit_bbio()
[all …]
Dmovinggc.c19 struct bbio bio; member
48 struct bio *bio = &io->bio.bio; in write_moving_finish() local
50 bio_free_pages(bio); in write_moving_finish()
62 static void read_moving_endio(struct bio *bio) in read_moving_endio() argument
64 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio()
65 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio()
68 if (bio->bi_status) in read_moving_endio()
69 io->op.status = bio->bi_status; in read_moving_endio()
75 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); in read_moving_endio()
80 struct bio *bio = &io->bio.bio; in moving_init() local
[all …]
/Linux-v5.4/fs/crypto/
Dbio.c29 static void __fscrypt_decrypt_bio(struct bio *bio, bool done) in __fscrypt_decrypt_bio() argument
34 bio_for_each_segment_all(bv, bio, iter_all) { in __fscrypt_decrypt_bio()
47 void fscrypt_decrypt_bio(struct bio *bio) in fscrypt_decrypt_bio() argument
49 __fscrypt_decrypt_bio(bio, false); in fscrypt_decrypt_bio()
56 struct bio *bio = ctx->bio; in completion_pages() local
58 __fscrypt_decrypt_bio(bio, true); in completion_pages()
60 bio_put(bio); in completion_pages()
63 void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) in fscrypt_enqueue_decrypt_bio() argument
66 ctx->bio = bio; in fscrypt_enqueue_decrypt_bio()
77 struct bio *bio; in fscrypt_zeroout_range() local
[all …]
/Linux-v5.4/fs/ext4/
Dreadpage.c63 struct bio *bio; member
69 static void __read_end_io(struct bio *bio) in __read_end_io() argument
75 bio_for_each_segment_all(bv, bio, iter_all) { in __read_end_io()
79 if (bio->bi_status || PageError(page)) { in __read_end_io()
88 if (bio->bi_private) in __read_end_io()
89 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io()
90 bio_put(bio); in __read_end_io()
100 fscrypt_decrypt_bio(ctx->bio); in decrypt_work()
110 fsverity_verify_bio(ctx->bio); in verity_work()
140 __read_end_io(ctx->bio); in bio_post_read_processing()
[all …]
/Linux-v5.4/fs/
Dmpage.c47 static void mpage_end_io(struct bio *bio) in mpage_end_io() argument
52 bio_for_each_segment_all(bv, bio, iter_all) { in mpage_end_io()
54 page_endio(page, bio_op(bio), in mpage_end_io()
55 blk_status_to_errno(bio->bi_status)); in mpage_end_io()
58 bio_put(bio); in mpage_end_io()
61 static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) in mpage_bio_submit() argument
63 bio->bi_end_io = mpage_end_io; in mpage_bio_submit()
64 bio_set_op_attrs(bio, op, op_flags); in mpage_bio_submit()
65 guard_bio_eod(op, bio); in mpage_bio_submit()
66 submit_bio(bio); in mpage_bio_submit()
[all …]
/Linux-v5.4/drivers/nvme/target/
Dio-cmd-bdev.c136 static void nvmet_bio_done(struct bio *bio) in nvmet_bio_done() argument
138 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done()
140 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done()
141 if (bio != &req->b.inline_bio) in nvmet_bio_done()
142 bio_put(bio); in nvmet_bio_done()
148 struct bio *bio; in nvmet_bdev_execute_rw() local
174 bio = &req->b.inline_bio; in nvmet_bdev_execute_rw()
175 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_rw()
177 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); in nvmet_bdev_execute_rw()
179 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
[all …]
/Linux-v5.4/mm/
Dpage_io.c29 static struct bio *get_swap_bio(gfp_t gfp_flags, in get_swap_bio()
32 struct bio *bio; in get_swap_bio() local
34 bio = bio_alloc(gfp_flags, 1); in get_swap_bio()
35 if (bio) { in get_swap_bio()
38 bio->bi_iter.bi_sector = map_swap_page(page, &bdev); in get_swap_bio()
39 bio_set_dev(bio, bdev); in get_swap_bio()
40 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; in get_swap_bio()
41 bio->bi_end_io = end_io; in get_swap_bio()
43 bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); in get_swap_bio()
45 return bio; in get_swap_bio()
[all …]
/Linux-v5.4/include/trace/events/
Dblock.h227 TP_PROTO(struct request_queue *q, struct bio *bio),
229 TP_ARGS(q, bio),
240 __entry->dev = bio_dev(bio);
241 __entry->sector = bio->bi_iter.bi_sector;
242 __entry->nr_sector = bio_sectors(bio);
243 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
264 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
266 TP_ARGS(q, bio, error),
277 __entry->dev = bio_dev(bio);
278 __entry->sector = bio->bi_iter.bi_sector;
[all …]
Dbcache.h11 TP_PROTO(struct bcache_device *d, struct bio *bio),
12 TP_ARGS(d, bio),
25 __entry->dev = bio_dev(bio);
28 __entry->sector = bio->bi_iter.bi_sector;
29 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
30 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
81 TP_PROTO(struct bcache_device *d, struct bio *bio),
82 TP_ARGS(d, bio)
86 TP_PROTO(struct bcache_device *d, struct bio *bio),
[all …]
/Linux-v5.4/drivers/md/
Ddm-zoned-target.c22 struct bio *bio; member
77 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) in dmz_bio_endio() argument
79 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); in dmz_bio_endio()
81 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) in dmz_bio_endio()
82 bio->bi_status = status; in dmz_bio_endio()
88 if (bio->bi_status != BLK_STS_OK && in dmz_bio_endio()
89 bio_op(bio) == REQ_OP_WRITE && in dmz_bio_endio()
94 bio_endio(bio); in dmz_bio_endio()
102 static void dmz_clone_endio(struct bio *clone) in dmz_clone_endio()
108 dmz_bio_endio(bioctx->bio, status); in dmz_clone_endio()
[all …]
Ddm-raid1.c121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument
130 bio_list_add(bl, bio); in queue_bio()
140 struct bio *bio; in dispatch_bios() local
142 while ((bio = bio_list_pop(bio_list))) in dispatch_bios()
143 queue_bio(ms, bio, WRITE); in dispatch_bios()
163 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument
165 return (struct mirror *) bio->bi_next; in bio_get_m()
168 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument
170 bio->bi_next = (struct bio *) m; in bio_set_m()
437 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument
[all …]
Ddm-thin.c223 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
381 struct bio *parent_bio;
382 struct bio *bio; member
385 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) in begin_discard()
392 op->bio = NULL; in begin_discard()
402 GFP_NOWAIT, 0, &op->bio); in issue_discard()
407 if (op->bio) { in end_discard()
412 bio_chain(op->bio, op->parent_bio); in end_discard()
413 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); in end_discard()
414 submit_bio(op->bio); in end_discard()
[all …]
/Linux-v5.4/fs/xfs/
Dxfs_bio_io.c24 struct bio *bio; in xfs_rw_bdev() local
29 bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); in xfs_rw_bdev()
30 bio_set_dev(bio, bdev); in xfs_rw_bdev()
31 bio->bi_iter.bi_sector = sector; in xfs_rw_bdev()
32 bio->bi_opf = op | REQ_META | REQ_SYNC; in xfs_rw_bdev()
39 while (bio_add_page(bio, page, len, off) != len) { in xfs_rw_bdev()
40 struct bio *prev = bio; in xfs_rw_bdev()
42 bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); in xfs_rw_bdev()
43 bio_copy_dev(bio, prev); in xfs_rw_bdev()
44 bio->bi_iter.bi_sector = bio_end_sector(prev); in xfs_rw_bdev()
[all …]
/Linux-v5.4/fs/erofs/
Ddata.c12 static void erofs_readendio(struct bio *bio) in erofs_readendio() argument
15 blk_status_t err = bio->bi_status; in erofs_readendio()
18 bio_for_each_segment_all(bvec, bio, iter_all) { in erofs_readendio()
32 bio_put(bio); in erofs_readendio()
127 static inline struct bio *erofs_read_raw_page(struct bio *bio, in erofs_read_raw_page() argument
147 if (bio && in erofs_read_raw_page()
151 submit_bio(bio); in erofs_read_raw_page()
152 bio = NULL; in erofs_read_raw_page()
155 if (!bio) { in erofs_read_raw_page()
221 bio = bio_alloc(GFP_NOIO, nblocks); in erofs_read_raw_page()
[all …]

12345678910>>...13