Lines Matching refs:bio

162 static void nvmet_bio_done(struct bio *bio)  in nvmet_bio_done()  argument
164 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done()
166 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done()
167 if (bio != &req->b.inline_bio) in nvmet_bio_done()
168 bio_put(bio); in nvmet_bio_done()
172 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument
187 bip = bio_integrity_alloc(bio, GFP_NOIO, in nvmet_bdev_alloc_bip()
194 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
196 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip()
202 rc = bio_integrity_add_page(bio, miter->page, len, in nvmet_bdev_alloc_bip()
219 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, in nvmet_bdev_alloc_bip() argument
229 struct bio *bio; in nvmet_bdev_execute_rw() local
263 bio = &req->b.inline_bio; in nvmet_bdev_execute_rw()
264 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_rw()
266 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); in nvmet_bdev_execute_rw()
268 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
269 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
270 bio->bi_private = req; in nvmet_bdev_execute_rw()
271 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_rw()
272 bio->bi_opf = op; in nvmet_bdev_execute_rw()
280 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) in nvmet_bdev_execute_rw()
282 struct bio *prev = bio; in nvmet_bdev_execute_rw()
285 rc = nvmet_bdev_alloc_bip(req, bio, in nvmet_bdev_execute_rw()
288 bio_io_error(bio); in nvmet_bdev_execute_rw()
293 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); in nvmet_bdev_execute_rw()
294 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_rw()
295 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
296 bio->bi_opf = op; in nvmet_bdev_execute_rw()
298 bio_chain(bio, prev); in nvmet_bdev_execute_rw()
307 rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); in nvmet_bdev_execute_rw()
309 bio_io_error(bio); in nvmet_bdev_execute_rw()
314 submit_bio(bio); in nvmet_bdev_execute_rw()
320 struct bio *bio = &req->b.inline_bio; in nvmet_bdev_execute_flush() local
325 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_flush()
326 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_flush()
327 bio->bi_private = req; in nvmet_bdev_execute_flush()
328 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_flush()
329 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in nvmet_bdev_execute_flush()
331 submit_bio(bio); in nvmet_bdev_execute_flush()
342 struct nvme_dsm_range *range, struct bio **bio) in nvmet_bdev_discard_range() argument
350 GFP_KERNEL, 0, bio); in nvmet_bdev_discard_range()
361 struct bio *bio = NULL; in nvmet_bdev_execute_discard() local
371 status = nvmet_bdev_discard_range(req, &range, &bio); in nvmet_bdev_execute_discard()
376 if (bio) { in nvmet_bdev_execute_discard()
377 bio->bi_private = req; in nvmet_bdev_execute_discard()
378 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_discard()
380 bio_io_error(bio); in nvmet_bdev_execute_discard()
382 submit_bio(bio); in nvmet_bdev_execute_discard()
409 struct bio *bio = NULL; in nvmet_bdev_execute_write_zeroes() local
423 GFP_KERNEL, &bio, 0); in nvmet_bdev_execute_write_zeroes()
424 if (bio) { in nvmet_bdev_execute_write_zeroes()
425 bio->bi_private = req; in nvmet_bdev_execute_write_zeroes()
426 bio->bi_end_io = nvmet_bio_done; in nvmet_bdev_execute_write_zeroes()
427 submit_bio(bio); in nvmet_bdev_execute_write_zeroes()