Lines Matching refs:bio

121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)  in queue_bio()  argument
130 bio_list_add(bl, bio); in queue_bio()
140 struct bio *bio; in dispatch_bios() local
142 while ((bio = bio_list_pop(bio_list))) in dispatch_bios()
143 queue_bio(ms, bio, WRITE); in dispatch_bios()
163 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument
165 return (struct mirror *) bio->bi_next; in bio_get_m()
168 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument
170 bio->bi_next = (struct bio *) m; in bio_set_m()
436 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument
439 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available()
442 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
450 static sector_t map_sector(struct mirror *m, struct bio *bio) in map_sector() argument
452 if (unlikely(!bio->bi_iter.bi_size)) in map_sector()
454 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
457 static void map_bio(struct mirror *m, struct bio *bio) in map_bio() argument
459 bio_set_dev(bio, m->dev->bdev); in map_bio()
460 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio()
464 struct bio *bio) in map_region() argument
467 io->sector = map_sector(m, bio); in map_region()
468 io->count = bio_sectors(bio); in map_region()
471 static void hold_bio(struct mirror_set *ms, struct bio *bio) in hold_bio() argument
486 bio->bi_status = BLK_STS_DM_REQUEUE; in hold_bio()
488 bio->bi_status = BLK_STS_IOERR; in hold_bio()
490 bio_endio(bio); in hold_bio()
497 bio_list_add(&ms->holds, bio); in hold_bio()
506 struct bio *bio = context; in read_callback() local
509 m = bio_get_m(bio); in read_callback()
510 bio_set_m(bio, NULL); in read_callback()
513 bio_endio(bio); in read_callback()
519 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { in read_callback()
523 queue_bio(m->ms, bio, bio_data_dir(bio)); in read_callback()
529 bio_io_error(bio); in read_callback()
533 static void read_async_bio(struct mirror *m, struct bio *bio) in read_async_bio() argument
539 .mem.ptr.bio = bio, in read_async_bio()
541 .notify.context = bio, in read_async_bio()
545 map_region(&io, m, bio); in read_async_bio()
546 bio_set_m(bio, m); in read_async_bio()
560 struct bio *bio; in do_reads() local
563 while ((bio = bio_list_pop(reads))) { in do_reads()
564 region = dm_rh_bio_to_region(ms->rh, bio); in do_reads()
571 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
576 read_async_bio(m, bio); in do_reads()
578 bio_io_error(bio); in do_reads()
597 struct bio *bio = (struct bio *) context; in write_callback() local
602 ms = bio_get_m(bio)->ms; in write_callback()
603 bio_set_m(bio, NULL); in write_callback()
612 bio_endio(bio); in write_callback()
620 if (bio_op(bio) == REQ_OP_DISCARD) { in write_callback()
621 bio->bi_status = BLK_STS_NOTSUPP; in write_callback()
622 bio_endio(bio); in write_callback()
638 bio_list_add(&ms->failures, bio); in write_callback()
644 static void do_write(struct mirror_set *ms, struct bio *bio) in do_write() argument
649 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); in do_write()
653 .mem.ptr.bio = bio, in do_write()
655 .notify.context = bio, in do_write()
659 if (bio_op(bio) == REQ_OP_DISCARD) { in do_write()
666 map_region(dest++, m, bio); in do_write()
672 bio_set_m(bio, get_default_mirror(ms)); in do_write()
680 struct bio *bio; in do_writes() local
697 while ((bio = bio_list_pop(writes))) { in do_writes()
698 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes()
699 (bio_op(bio) == REQ_OP_DISCARD)) { in do_writes()
700 bio_list_add(&sync, bio); in do_writes()
704 region = dm_rh_bio_to_region(ms->rh, bio); in do_writes()
708 bio_list_add(&requeue, bio); in do_writes()
728 bio_list_add(this_list, bio); in do_writes()
766 while ((bio = bio_list_pop(&sync))) in do_writes()
767 do_write(ms, bio); in do_writes()
769 while ((bio = bio_list_pop(&recover))) in do_writes()
770 dm_rh_delay(ms->rh, bio); in do_writes()
772 while ((bio = bio_list_pop(&nosync))) { in do_writes()
775 bio_list_add(&ms->failures, bio); in do_writes()
779 map_bio(get_default_mirror(ms), bio); in do_writes()
780 submit_bio_noacct(bio); in do_writes()
787 struct bio *bio; in do_failures() local
809 while ((bio = bio_list_pop(failures))) { in do_failures()
812 dm_rh_mark_nosync(ms->rh, bio); in do_failures()
829 bio_io_error(bio); in do_failures()
831 hold_bio(ms, bio); in do_failures()
833 bio_endio(bio); in do_failures()
1182 static int mirror_map(struct dm_target *ti, struct bio *bio) in mirror_map() argument
1184 int r, rw = bio_data_dir(bio); in mirror_map()
1189 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); in mirror_map()
1195 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); in mirror_map()
1196 queue_bio(ms, bio, rw); in mirror_map()
1200 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); in mirror_map()
1208 if (bio->bi_opf & REQ_RAHEAD) in mirror_map()
1211 queue_bio(ms, bio, rw); in mirror_map()
1219 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
1223 dm_bio_record(&bio_record->details, bio); in mirror_map()
1226 map_bio(m, bio); in mirror_map()
1231 static int mirror_end_io(struct dm_target *ti, struct bio *bio, in mirror_end_io() argument
1234 int rw = bio_data_dir(bio); in mirror_end_io()
1239 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); in mirror_end_io()
1245 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io()
1246 bio_op(bio) != REQ_OP_DISCARD) in mirror_end_io()
1254 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
1279 if (default_ok(m) || mirror_available(ms, bio)) { in mirror_end_io()
1282 dm_bio_restore(bd, bio); in mirror_end_io()
1284 bio->bi_status = 0; in mirror_end_io()
1286 queue_bio(ms, bio, rw); in mirror_end_io()
1304 struct bio *bio; in mirror_presuspend() local
1319 while ((bio = bio_list_pop(&holds))) in mirror_presuspend()
1320 hold_bio(ms, bio); in mirror_presuspend()