Lines Matching refs:bio

236 static void req_bio_endio(struct request *rq, struct bio *bio,  in req_bio_endio()  argument
240 bio->bi_status = error; in req_bio_endio()
243 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
245 bio_advance(bio, nbytes); in req_bio_endio()
252 if (bio->bi_iter.bi_size) in req_bio_endio()
253 bio->bi_status = BLK_STS_IOERR; in req_bio_endio()
255 bio->bi_iter.bi_sector = rq->__sector; in req_bio_endio()
259 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
260 bio_endio(bio); in req_bio_endio()
273 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
471 static inline int bio_queue_enter(struct bio *bio) in bio_queue_enter() argument
473 struct gendisk *disk = bio->bi_bdev->bd_disk; in bio_queue_enter()
477 if (bio->bi_opf & REQ_NOWAIT) { in bio_queue_enter()
480 bio_wouldblock_error(bio); in bio_queue_enter()
502 bio_io_error(bio); in bio_queue_enter()
653 static void handle_bad_sector(struct bio *bio, sector_t maxsector) in handle_bad_sector() argument
659 bio_devname(bio, b), bio->bi_opf, in handle_bad_sector()
660 bio_end_sector(bio), maxsector); in handle_bad_sector()
698 static inline bool bio_check_ro(struct bio *bio) in bio_check_ro() argument
700 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro()
703 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro()
708 bio_devname(bio, b), bio->bi_bdev->bd_partno); in bio_check_ro()
716 static noinline int should_fail_bio(struct bio *bio) in should_fail_bio() argument
718 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) in should_fail_bio()
729 static inline int bio_check_eod(struct bio *bio) in bio_check_eod() argument
731 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in bio_check_eod()
732 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod()
736 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod()
737 handle_bad_sector(bio, maxsector); in bio_check_eod()
746 static int blk_partition_remap(struct bio *bio) in blk_partition_remap() argument
748 struct block_device *p = bio->bi_bdev; in blk_partition_remap()
750 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) in blk_partition_remap()
752 if (bio_sectors(bio)) { in blk_partition_remap()
753 bio->bi_iter.bi_sector += p->bd_start_sect; in blk_partition_remap()
754 trace_block_bio_remap(bio, p->bd_dev, in blk_partition_remap()
755 bio->bi_iter.bi_sector - in blk_partition_remap()
758 bio_set_flag(bio, BIO_REMAPPED); in blk_partition_remap()
766 struct bio *bio) in blk_check_zone_append() argument
768 sector_t pos = bio->bi_iter.bi_sector; in blk_check_zone_append()
769 int nr_sectors = bio_sectors(bio); in blk_check_zone_append()
792 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append()
797 static noinline_for_stack bool submit_bio_checks(struct bio *bio) in submit_bio_checks() argument
799 struct block_device *bdev = bio->bi_bdev; in submit_bio_checks()
806 plug = blk_mq_plug(q, bio); in submit_bio_checks()
808 bio->bi_opf |= REQ_NOWAIT; in submit_bio_checks()
814 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) in submit_bio_checks()
817 if (should_fail_bio(bio)) in submit_bio_checks()
819 if (unlikely(bio_check_ro(bio))) in submit_bio_checks()
821 if (!bio_flagged(bio, BIO_REMAPPED)) { in submit_bio_checks()
822 if (unlikely(bio_check_eod(bio))) in submit_bio_checks()
824 if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) in submit_bio_checks()
832 if (op_is_flush(bio->bi_opf) && in submit_bio_checks()
834 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_checks()
835 if (!bio_sectors(bio)) { in submit_bio_checks()
842 bio_clear_hipri(bio); in submit_bio_checks()
844 switch (bio_op(bio)) { in submit_bio_checks()
858 status = blk_check_zone_append(q, bio); in submit_bio_checks()
890 if (blk_throtl_bio(bio)) { in submit_bio_checks()
891 blkcg_bio_issue_init(bio); in submit_bio_checks()
895 blk_cgroup_bio_start(bio); in submit_bio_checks()
896 blkcg_bio_issue_init(bio); in submit_bio_checks()
898 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { in submit_bio_checks()
899 trace_block_bio_queue(bio); in submit_bio_checks()
903 bio_set_flag(bio, BIO_TRACE_COMPLETION); in submit_bio_checks()
910 bio->bi_status = status; in submit_bio_checks()
911 bio_endio(bio); in submit_bio_checks()
915 static blk_qc_t __submit_bio(struct bio *bio) in __submit_bio() argument
917 struct gendisk *disk = bio->bi_bdev->bd_disk; in __submit_bio()
920 if (unlikely(bio_queue_enter(bio) != 0)) in __submit_bio()
923 if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio)) in __submit_bio()
926 ret = disk->fops->submit_bio(bio); in __submit_bio()
929 return blk_mq_submit_bio(bio); in __submit_bio()
955 static blk_qc_t __submit_bio_noacct(struct bio *bio) in __submit_bio_noacct() argument
960 BUG_ON(bio->bi_next); in __submit_bio_noacct()
966 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in __submit_bio_noacct()
975 ret = __submit_bio(bio); in __submit_bio_noacct()
983 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) in __submit_bio_noacct()
984 if (q == bio->bi_bdev->bd_disk->queue) in __submit_bio_noacct()
985 bio_list_add(&same, bio); in __submit_bio_noacct()
987 bio_list_add(&lower, bio); in __submit_bio_noacct()
995 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); in __submit_bio_noacct()
1001 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) in __submit_bio_noacct_mq() argument
1009 ret = __submit_bio(bio); in __submit_bio_noacct_mq()
1010 } while ((bio = bio_list_pop(&bio_list[0]))); in __submit_bio_noacct_mq()
1025 blk_qc_t submit_bio_noacct(struct bio *bio) in submit_bio_noacct() argument
1034 bio_list_add(&current->bio_list[0], bio); in submit_bio_noacct()
1038 if (!bio->bi_bdev->bd_disk->fops->submit_bio) in submit_bio_noacct()
1039 return __submit_bio_noacct_mq(bio); in submit_bio_noacct()
1040 return __submit_bio_noacct(bio); in submit_bio_noacct()
1057 blk_qc_t submit_bio(struct bio *bio) in submit_bio() argument
1059 if (blkcg_punt_bio_submit(bio)) in submit_bio()
1066 if (bio_has_data(bio)) { in submit_bio()
1069 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) in submit_bio()
1071 bio->bi_bdev->bd_disk->queue) >> 9; in submit_bio()
1073 count = bio_sectors(bio); in submit_bio()
1075 if (op_is_write(bio_op(bio))) { in submit_bio()
1078 task_io_account_read(bio->bi_iter.bi_size); in submit_bio()
1089 if (unlikely(bio_op(bio) == REQ_OP_READ && in submit_bio()
1090 bio_flagged(bio, BIO_WORKINGSET))) { in submit_bio()
1095 ret = submit_bio_noacct(bio); in submit_bio()
1101 return submit_bio_noacct(bio); in submit_bio()
1209 struct bio *bio; in blk_rq_err_bytes() local
1221 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
1222 if ((bio->bi_opf & ff) != ff) in blk_rq_err_bytes()
1224 bytes += bio->bi_iter.bi_size; in blk_rq_err_bytes()
1285 if (rq->bio && rq->bio->bi_bdev) in blk_account_io_start()
1286 rq->part = rq->bio->bi_bdev; in blk_account_io_start()
1317 unsigned long bio_start_io_acct(struct bio *bio) in bio_start_io_acct() argument
1319 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); in bio_start_io_acct()
1344 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, in bio_end_io_acct_remapped() argument
1347 __part_end_io_acct(orig_bdev, bio_op(bio), start_time); in bio_end_io_acct_remapped()
1364 if (rq->bio) { in blk_steal_bios()
1366 list->tail->bi_next = rq->bio; in blk_steal_bios()
1368 list->head = rq->bio; in blk_steal_bios()
1371 rq->bio = NULL; in blk_steal_bios()
1408 if (!req->bio) in blk_update_request()
1424 while (req->bio) { in blk_update_request()
1425 struct bio *bio = req->bio; in blk_update_request() local
1426 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); in blk_update_request()
1428 if (bio_bytes == bio->bi_iter.bi_size) in blk_update_request()
1429 req->bio = bio->bi_next; in blk_update_request()
1432 bio_clear_flag(bio, BIO_TRACE_COMPLETION); in blk_update_request()
1433 req_bio_endio(req, bio, bio_bytes, error); in blk_update_request()
1445 if (!req->bio) { in blk_update_request()
1464 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; in blk_update_request()
1541 struct bio *bio; in blk_rq_unprep_clone() local
1543 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
1544 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
1546 bio_put(bio); in blk_rq_unprep_clone()
1570 int (*bio_ctr)(struct bio *, struct bio *, void *), in blk_rq_prep_clone() argument
1573 struct bio *bio, *bio_src; in blk_rq_prep_clone() local
1579 bio = bio_clone_fast(bio_src, gfp_mask, bs); in blk_rq_prep_clone()
1580 if (!bio) in blk_rq_prep_clone()
1583 if (bio_ctr && bio_ctr(bio, bio_src, data)) in blk_rq_prep_clone()
1586 if (rq->bio) { in blk_rq_prep_clone()
1587 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
1588 rq->biotail = bio; in blk_rq_prep_clone()
1590 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
1592 bio = NULL; in blk_rq_prep_clone()
1605 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) in blk_rq_prep_clone()
1611 if (bio) in blk_rq_prep_clone()
1612 bio_put(bio); in blk_rq_prep_clone()
1767 sizeof_field(struct bio, bi_opf)); in blk_dev_init()