Lines Matching refs:bio
223 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
381 struct bio *parent_bio;
382 struct bio *bio; member
385 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) in begin_discard()
392 op->bio = NULL; in begin_discard()
402 GFP_NOWAIT, 0, &op->bio); in issue_discard()
407 if (op->bio) { in end_discard()
412 bio_chain(op->bio, op->parent_bio); in end_discard()
413 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); in end_discard()
414 submit_bio(op->bio); in end_discard()
441 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
453 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
600 struct bio *bio; in error_bio_list() local
602 while ((bio = bio_list_pop(bios))) { in error_bio_list()
603 bio->bi_status = error; in error_bio_list()
604 bio_endio(bio); in error_bio_list()
678 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
681 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
694 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, in get_bio_block_range() argument
698 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
699 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range()
719 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
722 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
724 bio_set_dev(bio, tc->pool_dev->bdev); in remap()
726 bio->bi_iter.bi_sector = in remap()
730 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
734 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
736 bio_set_dev(bio, tc->origin_dev->bdev); in remap_to_origin()
739 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
741 return op_is_flush(bio->bi_opf) && in bio_triggers_commit()
745 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
749 if (bio_op(bio) == REQ_OP_DISCARD) in inc_all_io_entry()
752 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in inc_all_io_entry()
756 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
761 if (!bio_triggers_commit(tc, bio)) { in issue()
762 generic_make_request(bio); in issue()
772 bio_io_error(bio); in issue()
781 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
785 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
787 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
788 issue(tc, bio); in remap_to_origin_and_issue()
791 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
794 remap(tc, bio, block); in remap_and_issue()
795 issue(tc, bio); in remap_and_issue()
828 struct bio *bio; member
860 static void overwrite_endio(struct bio *bio) in overwrite_endio() argument
862 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in overwrite_endio()
865 bio->bi_end_io = m->saved_bi_end_io; in overwrite_endio()
867 m->status = bio->bi_status; in overwrite_endio()
897 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
909 struct bio *bio; in __inc_remap_and_issue_cell() local
911 while ((bio = bio_list_pop(&cell->bios))) { in __inc_remap_and_issue_cell()
912 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) in __inc_remap_and_issue_cell()
913 bio_list_add(&info->defer_bios, bio); in __inc_remap_and_issue_cell()
915 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
922 bio_list_add(&info->issue_bios, bio); in __inc_remap_and_issue_cell()
931 struct bio *bio; in inc_remap_and_issue_cell() local
946 while ((bio = bio_list_pop(&info.defer_bios))) in inc_remap_and_issue_cell()
947 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
949 while ((bio = bio_list_pop(&info.issue_bios))) in inc_remap_and_issue_cell()
950 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
960 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) in complete_overwrite_bio() argument
969 if (!bio_triggers_commit(tc, bio)) { in complete_overwrite_bio()
970 bio_endio(bio); in complete_overwrite_bio()
980 bio_io_error(bio); in complete_overwrite_bio()
989 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
997 struct bio *bio = m->bio; in process_prepared_mapping() local
1023 if (bio) { in process_prepared_mapping()
1025 complete_overwrite_bio(tc, bio); in process_prepared_mapping()
1049 bio_io_error(m->bio); in process_prepared_discard_fail()
1055 bio_endio(m->bio); in process_prepared_discard_success()
1067 bio_io_error(m->bio); in process_prepared_discard_no_passdown()
1069 bio_endio(m->bio); in process_prepared_discard_no_passdown()
1078 struct bio *discard_parent) in passdown_double_checking_shared_status()
1137 static void passdown_endio(struct bio *bio) in passdown_endio() argument
1143 queue_passdown_pt2(bio->bi_private); in passdown_endio()
1144 bio_put(bio); in passdown_endio()
1152 struct bio *discard_parent; in process_prepared_discard_passdown_pt1()
1163 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1176 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1218 bio_io_error(m->bio); in process_prepared_discard_passdown_pt2()
1220 bio_endio(m->bio); in process_prepared_discard_passdown_pt2()
1245 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
1247 return bio->bi_iter.bi_size == in io_overlaps_block()
1251 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
1253 return (bio_data_dir(bio) == WRITE) && in io_overwrites_block()
1254 io_overlaps_block(pool, bio); in io_overwrites_block()
1257 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, in save_and_set_endio() argument
1260 *save = bio->bi_end_io; in save_and_set_endio()
1261 bio->bi_end_io = fn; in save_and_set_endio()
1282 m->bio = NULL; in get_next_mapping()
1301 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
1306 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in remap_and_issue_overwrite()
1309 m->bio = bio; in remap_and_issue_overwrite()
1310 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); in remap_and_issue_overwrite()
1311 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1312 remap_and_issue(tc, bio, data_begin); in remap_and_issue_overwrite()
1321 struct dm_bio_prison_cell *cell, struct bio *bio, in schedule_copy() argument
1349 if (io_overwrites_block(pool, bio)) in schedule_copy()
1350 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1381 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_internal_copy() argument
1384 data_origin, data_dest, cell, bio, in schedule_internal_copy()
1390 struct bio *bio) in schedule_zero() argument
1408 if (io_overwrites_block(pool, bio)) in schedule_zero()
1409 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1419 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_external_copy() argument
1427 virt_block, data_dest, cell, bio, in schedule_external_copy()
1432 virt_block, data_dest, cell, bio, in schedule_external_copy()
1436 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1592 static void retry_on_resume(struct bio *bio) in retry_on_resume() argument
1594 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in retry_on_resume()
1599 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1627 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1632 bio->bi_status = error; in handle_unserviceable_bio()
1633 bio_endio(bio); in handle_unserviceable_bio()
1635 retry_on_resume(bio); in handle_unserviceable_bio()
1640 struct bio *bio; in retry_bios_on_resume() local
1653 while ((bio = bio_list_pop(&bios))) in retry_bios_on_resume()
1654 retry_on_resume(bio); in retry_bios_on_resume()
1671 m->bio = virt_cell->holder; in process_discard_cell_no_passdown()
1678 struct bio *bio) in break_up_discard_bio() argument
1722 m->bio = bio; in break_up_discard_bio()
1732 bio_inc_remaining(bio); in break_up_discard_bio()
1742 struct bio *bio = virt_cell->holder; in process_discard_cell_passdown() local
1743 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_discard_cell_passdown()
1751 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1758 bio_endio(bio); in process_discard_cell_passdown()
1761 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1767 get_bio_block_range(tc, bio, &begin, &end); in process_discard_bio()
1772 bio_endio(bio); in process_discard_bio()
1777 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1790 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1803 data_block, cell, bio); in break_sharing()
1822 struct bio *bio; in __remap_and_issue_shared_cell() local
1824 while ((bio = bio_list_pop(&cell->bios))) { in __remap_and_issue_shared_cell()
1825 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || in __remap_and_issue_shared_cell()
1826 bio_op(bio) == REQ_OP_DISCARD) in __remap_and_issue_shared_cell()
1827 bio_list_add(&info->defer_bios, bio); in __remap_and_issue_shared_cell()
1829 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in __remap_and_issue_shared_cell()
1832 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1833 bio_list_add(&info->issue_bios, bio); in __remap_and_issue_shared_cell()
1842 struct bio *bio; in remap_and_issue_shared_cell() local
1852 while ((bio = bio_list_pop(&info.defer_bios))) in remap_and_issue_shared_cell()
1853 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1855 while ((bio = bio_list_pop(&info.issue_bios))) in remap_and_issue_shared_cell()
1856 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1859 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1873 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1878 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio()
1879 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1882 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_shared_bio()
1885 inc_all_io_entry(pool, bio); in process_shared_bio()
1886 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1893 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1903 if (!bio->bi_iter.bi_size) { in provision_block()
1904 inc_all_io_entry(pool, bio); in provision_block()
1907 remap_and_issue(tc, bio, 0); in provision_block()
1914 if (bio_data_dir(bio) == READ) { in provision_block()
1915 zero_fill_bio(bio); in provision_block()
1917 bio_endio(bio); in provision_block()
1925 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1927 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1946 struct bio *bio = cell->holder; in process_cell() local
1947 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1959 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1961 inc_all_io_entry(pool, bio); in process_cell()
1962 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1968 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1969 inc_all_io_entry(pool, bio); in process_cell()
1972 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1973 remap_to_origin_and_issue(tc, bio); in process_cell()
1975 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1976 zero_fill_bio(bio); in process_cell()
1977 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1978 remap_to_origin_and_issue(tc, bio); in process_cell()
1981 zero_fill_bio(bio); in process_cell()
1982 bio_endio(bio); in process_cell()
1985 provision_block(tc, bio, block, cell); in process_cell()
1992 bio_io_error(bio); in process_cell()
1997 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
2000 dm_block_t block = get_bio_block(tc, bio); in process_bio()
2009 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2015 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
2019 int rw = bio_data_dir(bio); in __process_bio_read_only()
2020 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
2026 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only()
2027 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2031 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2032 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
2042 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2047 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2048 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
2052 zero_fill_bio(bio); in __process_bio_read_only()
2053 bio_endio(bio); in __process_bio_read_only()
2061 bio_io_error(bio); in __process_bio_read_only()
2066 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
2068 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
2076 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
2078 bio_endio(bio); in process_bio_success()
2081 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
2083 bio_io_error(bio); in process_bio_fail()
2109 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
2113 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
2127 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in __thin_bio_rb_add()
2136 struct bio *bio; in __extract_sorted_bios() local
2140 bio = thin_bio(pbd); in __extract_sorted_bios()
2142 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
2151 struct bio *bio; in __sort_thin_deferred_bios() local
2159 while ((bio = bio_list_pop(&bios))) in __sort_thin_deferred_bios()
2160 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
2174 struct bio *bio; in process_thin_deferred_bios() local
2202 while ((bio = bio_list_pop(&bios))) { in process_thin_deferred_bios()
2210 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2216 if (bio_op(bio) == REQ_OP_DISCARD) in process_thin_deferred_bios()
2217 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2219 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2353 struct bio *bio; in process_deferred_bios() local
2386 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
2387 bio_io_error(bio); in process_deferred_bios()
2392 while ((bio = bio_list_pop(&bio_completions))) in process_deferred_bios()
2393 bio_endio(bio); in process_deferred_bios()
2395 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
2396 generic_make_request(bio); in process_deferred_bios()
2658 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2664 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2670 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2675 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2693 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2695 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_hook_bio()
2707 static int thin_bio_map(struct dm_target *ti, struct bio *bio) in thin_bio_map() argument
2711 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2717 thin_hook_bio(tc, bio); in thin_bio_map()
2720 bio->bi_status = BLK_STS_DM_REQUEUE; in thin_bio_map()
2721 bio_endio(bio); in thin_bio_map()
2726 bio_io_error(bio); in thin_bio_map()
2730 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { in thin_bio_map()
2731 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2740 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2770 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2775 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2779 remap(tc, bio, result.block); in thin_bio_map()
2793 bio_io_error(bio); in thin_bio_map()
3410 static int pool_map(struct dm_target *ti, struct bio *bio) in pool_map() argument
3421 bio_set_dev(bio, pt->data_dev->bdev); in pool_map()
4290 static int thin_map(struct dm_target *ti, struct bio *bio) in thin_map() argument
4292 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in thin_map()
4294 return thin_bio_map(ti, bio); in thin_map()
4297 static int thin_endio(struct dm_target *ti, struct bio *bio, in thin_endio() argument
4301 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_endio()