Lines Matching refs:bio

318 int __bio_queue_enter(struct request_queue *q, struct bio *bio)  in __bio_queue_enter()  argument
321 struct gendisk *disk = bio->bi_bdev->bd_disk; in __bio_queue_enter()
323 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter()
326 bio_wouldblock_error(bio); in __bio_queue_enter()
348 bio_io_error(bio); in __bio_queue_enter()
488 static inline void bio_check_ro(struct bio *bio) in bio_check_ro() argument
490 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { in bio_check_ro()
491 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro()
494 bio->bi_bdev); in bio_check_ro()
499 static noinline int should_fail_bio(struct bio *bio) in should_fail_bio() argument
501 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) in should_fail_bio()
512 static inline int bio_check_eod(struct bio *bio) in bio_check_eod() argument
514 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in bio_check_eod()
515 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod()
519 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod()
522 current->comm, bio->bi_bdev, bio->bi_opf, in bio_check_eod()
523 bio->bi_iter.bi_sector, nr_sectors, maxsector); in bio_check_eod()
532 static int blk_partition_remap(struct bio *bio) in blk_partition_remap() argument
534 struct block_device *p = bio->bi_bdev; in blk_partition_remap()
536 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) in blk_partition_remap()
538 if (bio_sectors(bio)) { in blk_partition_remap()
539 bio->bi_iter.bi_sector += p->bd_start_sect; in blk_partition_remap()
540 trace_block_bio_remap(bio, p->bd_dev, in blk_partition_remap()
541 bio->bi_iter.bi_sector - in blk_partition_remap()
544 bio_set_flag(bio, BIO_REMAPPED); in blk_partition_remap()
552 struct bio *bio) in blk_check_zone_append() argument
554 int nr_sectors = bio_sectors(bio); in blk_check_zone_append()
557 if (!bdev_is_zoned(bio->bi_bdev)) in blk_check_zone_append()
561 if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) || in blk_check_zone_append()
562 !bio_zone_is_seq(bio)) in blk_check_zone_append()
577 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append()
582 static void __submit_bio(struct bio *bio) in __submit_bio() argument
584 struct gendisk *disk = bio->bi_bdev->bd_disk; in __submit_bio()
586 if (unlikely(!blk_crypto_bio_prep(&bio))) in __submit_bio()
590 blk_mq_submit_bio(bio); in __submit_bio()
591 } else if (likely(bio_queue_enter(bio) == 0)) { in __submit_bio()
592 disk->fops->submit_bio(bio); in __submit_bio()
616 static void __submit_bio_noacct(struct bio *bio) in __submit_bio_noacct() argument
620 BUG_ON(bio->bi_next); in __submit_bio_noacct()
626 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __submit_bio_noacct()
635 __submit_bio(bio); in __submit_bio_noacct()
643 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) in __submit_bio_noacct()
644 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct()
645 bio_list_add(&same, bio); in __submit_bio_noacct()
647 bio_list_add(&lower, bio); in __submit_bio_noacct()
655 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); in __submit_bio_noacct()
660 static void __submit_bio_noacct_mq(struct bio *bio) in __submit_bio_noacct_mq() argument
667 __submit_bio(bio); in __submit_bio_noacct_mq()
668 } while ((bio = bio_list_pop(&bio_list[0]))); in __submit_bio_noacct_mq()
673 void submit_bio_noacct_nocheck(struct bio *bio) in submit_bio_noacct_nocheck() argument
682 bio_list_add(&current->bio_list[0], bio); in submit_bio_noacct_nocheck()
683 else if (!bio->bi_bdev->bd_disk->fops->submit_bio) in submit_bio_noacct_nocheck()
684 __submit_bio_noacct_mq(bio); in submit_bio_noacct_nocheck()
686 __submit_bio_noacct(bio); in submit_bio_noacct_nocheck()
698 void submit_bio_noacct(struct bio *bio) in submit_bio_noacct() argument
700 struct block_device *bdev = bio->bi_bdev; in submit_bio_noacct()
707 plug = blk_mq_plug(bio); in submit_bio_noacct()
709 bio->bi_opf |= REQ_NOWAIT; in submit_bio_noacct()
715 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) in submit_bio_noacct()
718 if (should_fail_bio(bio)) in submit_bio_noacct()
720 bio_check_ro(bio); in submit_bio_noacct()
721 if (!bio_flagged(bio, BIO_REMAPPED)) { in submit_bio_noacct()
722 if (unlikely(bio_check_eod(bio))) in submit_bio_noacct()
724 if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) in submit_bio_noacct()
732 if (op_is_flush(bio->bi_opf) && in submit_bio_noacct()
734 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_noacct()
735 if (!bio_sectors(bio)) { in submit_bio_noacct()
742 bio_clear_polled(bio); in submit_bio_noacct()
744 switch (bio_op(bio)) { in submit_bio_noacct()
754 status = blk_check_zone_append(q, bio); in submit_bio_noacct()
762 if (!bdev_is_zoned(bio->bi_bdev)) in submit_bio_noacct()
766 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q)) in submit_bio_noacct()
777 if (blk_throtl_bio(bio)) in submit_bio_noacct()
780 blk_cgroup_bio_start(bio); in submit_bio_noacct()
781 blkcg_bio_issue_init(bio); in submit_bio_noacct()
783 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { in submit_bio_noacct()
784 trace_block_bio_queue(bio); in submit_bio_noacct()
788 bio_set_flag(bio, BIO_TRACE_COMPLETION); in submit_bio_noacct()
790 submit_bio_noacct_nocheck(bio); in submit_bio_noacct()
796 bio->bi_status = status; in submit_bio_noacct()
797 bio_endio(bio); in submit_bio_noacct()
814 void submit_bio(struct bio *bio) in submit_bio() argument
816 if (blkcg_punt_bio_submit(bio)) in submit_bio()
819 if (bio_op(bio) == REQ_OP_READ) { in submit_bio()
820 task_io_account_read(bio->bi_iter.bi_size); in submit_bio()
821 count_vm_events(PGPGIN, bio_sectors(bio)); in submit_bio()
822 } else if (bio_op(bio) == REQ_OP_WRITE) { in submit_bio()
823 count_vm_events(PGPGOUT, bio_sectors(bio)); in submit_bio()
826 submit_bio_noacct(bio); in submit_bio()
842 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) in bio_poll() argument
844 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_poll()
845 blk_qc_t cookie = READ_ONCE(bio->bi_cookie); in bio_poll()
860 if (bio_queue_enter(bio)) in bio_poll()
868 ret = disk->fops->poll_bio(bio, iob, flags); in bio_poll()
882 struct bio *bio; in iocb_bio_iopoll() local
906 bio = READ_ONCE(kiocb->private); in iocb_bio_iopoll()
907 if (bio && bio->bi_bdev) in iocb_bio_iopoll()
908 ret = bio_poll(bio, iob, flags); in iocb_bio_iopoll()
952 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time) in bio_start_io_acct_time() argument
954 bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), in bio_start_io_acct_time()
955 bio_op(bio), start_time); in bio_start_io_acct_time()
965 unsigned long bio_start_io_acct(struct bio *bio) in bio_start_io_acct() argument
967 return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), in bio_start_io_acct()
968 bio_op(bio), jiffies); in bio_start_io_acct()
987 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, in bio_end_io_acct_remapped() argument
990 bdev_end_io_acct(orig_bdev, bio_op(bio), start_time); in bio_end_io_acct_remapped()
1185 sizeof_field(struct bio, bi_opf)); in blk_dev_init()