Lines Matching refs:dio

331 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
1191 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in submit_flush_bio() argument
1197 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in submit_flush_bio()
1212 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) in do_endio_flush() argument
1214 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in do_endio_flush()
1216 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) in do_endio_flush()
1217 submit_flush_bio(ic, dio); in do_endio_flush()
1222 static void dec_in_flight(struct dm_integrity_io *dio) in dec_in_flight() argument
1224 if (atomic_dec_and_test(&dio->in_flight)) { in dec_in_flight()
1225 struct dm_integrity_c *ic = dio->ic; in dec_in_flight()
1228 remove_range(ic, &dio->range); in dec_in_flight()
1230 if (unlikely(dio->write)) in dec_in_flight()
1233 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in dec_in_flight()
1235 if (unlikely(dio->bi_status) && !bio->bi_status) in dec_in_flight()
1236 bio->bi_status = dio->bi_status; in dec_in_flight()
1237 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight()
1238 dio->range.logical_sector += dio->range.n_sectors; in dec_in_flight()
1239 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); in dec_in_flight()
1240 INIT_WORK(&dio->work, integrity_bio_wait); in dec_in_flight()
1241 queue_work(ic->wait_wq, &dio->work); in dec_in_flight()
1244 do_endio_flush(ic, dio); in dec_in_flight()
1250 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in integrity_end_io() local
1252 bio->bi_iter = dio->orig_bi_iter; in integrity_end_io()
1253 bio->bi_disk = dio->orig_bi_disk; in integrity_end_io()
1254 bio->bi_partno = dio->orig_bi_partno; in integrity_end_io()
1255 if (dio->orig_bi_integrity) { in integrity_end_io()
1256 bio->bi_integrity = dio->orig_bi_integrity; in integrity_end_io()
1259 bio->bi_end_io = dio->orig_bi_end_io; in integrity_end_io()
1261 if (dio->completion) in integrity_end_io()
1262 complete(dio->completion); in integrity_end_io()
1264 dec_in_flight(dio); in integrity_end_io()
1315 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); in integrity_metadata() local
1316 struct dm_integrity_c *ic = dio->ic; in integrity_metadata()
1324 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in integrity_metadata()
1328 unsigned sectors_to_process = dio->range.n_sectors; in integrity_metadata()
1329 sector_t sector = dio->range.logical_sector; in integrity_metadata()
1339 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { in integrity_metadata()
1356 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1357 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE); in integrity_metadata()
1383 struct bio_integrity_payload *bip = dio->orig_bi_integrity; in integrity_metadata()
1388 unsigned data_to_process = dio->range.n_sectors; in integrity_metadata()
1399 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, in integrity_metadata()
1400 this_len, !dio->write ? TAG_READ : TAG_WRITE); in integrity_metadata()
1410 dec_in_flight(dio); in integrity_metadata()
1413 dio->bi_status = errno_to_blk_status(r); in integrity_metadata()
1414 dec_in_flight(dio); in integrity_metadata()
1420 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); in dm_integrity_map() local
1425 dio->ic = ic; in dm_integrity_map()
1426 dio->bi_status = 0; in dm_integrity_map()
1429 submit_flush_bio(ic, dio); in dm_integrity_map()
1433 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in dm_integrity_map()
1434 dio->write = bio_op(bio) == REQ_OP_WRITE; in dm_integrity_map()
1435 dio->fua = dio->write && bio->bi_opf & REQ_FUA; in dm_integrity_map()
1436 if (unlikely(dio->fua)) { in dm_integrity_map()
1443 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_map()
1445 (unsigned long long)dio->range.logical_sector, bio_sectors(bio), in dm_integrity_map()
1449 …if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1… in dm_integrity_map()
1452 (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); in dm_integrity_map()
1488 if (unlikely(ic->mode == 'R') && unlikely(dio->write)) in dm_integrity_map()
1491 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in dm_integrity_map()
1492 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in dm_integrity_map()
1495 dm_integrity_map_continue(dio, true); in dm_integrity_map()
1499 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, in __journal_read_write() argument
1502 struct dm_integrity_c *ic = dio->ic; in __journal_read_write()
1506 logical_sector = dio->range.logical_sector; in __journal_read_write()
1507 n_sectors = dio->range.n_sectors; in __journal_read_write()
1518 if (likely(dio->write)) in __journal_read_write()
1524 if (unlikely(!dio->write)) { in __journal_read_write()
1571 if (likely(dio->write)) in __journal_read_write()
1579 if (likely(dio->write)) in __journal_read_write()
1584 if (likely(dio->write)) { in __journal_read_write()
1620 if (unlikely(!dio->write)) in __journal_read_write()
1625 if (likely(dio->write)) { in __journal_read_write()
1635 remove_range(ic, &dio->range); in __journal_read_write()
1641 dio->range.logical_sector = logical_sector; in __journal_read_write()
1642 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); in __journal_read_write()
1643 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); in __journal_read_write()
1650 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) in dm_integrity_map_continue() argument
1652 struct dm_integrity_c *ic = dio->ic; in dm_integrity_map_continue()
1653 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); in dm_integrity_map_continue()
1657 bool need_sync_io = ic->internal_hash && !dio->write; in dm_integrity_map_continue()
1660 INIT_WORK(&dio->work, integrity_bio_wait); in dm_integrity_map_continue()
1661 queue_work(ic->metadata_wq, &dio->work); in dm_integrity_map_continue()
1673 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue()
1676 if (dio->write) { in dm_integrity_map_continue()
1680 dio->range.n_sectors = min(dio->range.n_sectors, in dm_integrity_map_continue()
1682 if (unlikely(!dio->range.n_sectors)) { in dm_integrity_map_continue()
1688 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; in dm_integrity_map_continue()
1706 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); in dm_integrity_map_continue()
1720 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); in dm_integrity_map_continue()
1726 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); in dm_integrity_map_continue()
1728 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) in dm_integrity_map_continue()
1729 dio->range.n_sectors = next_sector - dio->range.logical_sector; in dm_integrity_map_continue()
1733 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { in dm_integrity_map_continue()
1734 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) in dm_integrity_map_continue()
1737 dio->range.n_sectors = i; in dm_integrity_map_continue()
1741 if (unlikely(!add_new_range(ic, &dio->range, true))) { in dm_integrity_map_continue()
1750 INIT_WORK(&dio->work, integrity_bio_wait); in dm_integrity_map_continue()
1751 queue_work(ic->wait_wq, &dio->work); in dm_integrity_map_continue()
1754 wait_and_add_new_range(ic, &dio->range); in dm_integrity_map_continue()
1764 dio->in_flight = (atomic_t)ATOMIC_INIT(2); in dm_integrity_map_continue()
1768 dio->completion = &read_comp; in dm_integrity_map_continue()
1770 dio->completion = NULL; in dm_integrity_map_continue()
1772 dio->orig_bi_iter = bio->bi_iter; in dm_integrity_map_continue()
1774 dio->orig_bi_disk = bio->bi_disk; in dm_integrity_map_continue()
1775 dio->orig_bi_partno = bio->bi_partno; in dm_integrity_map_continue()
1778 dio->orig_bi_integrity = bio_integrity(bio); in dm_integrity_map_continue()
1782 dio->orig_bi_end_io = bio->bi_end_io; in dm_integrity_map_continue()
1785 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; in dm_integrity_map_continue()
1792 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) in dm_integrity_map_continue()
1795 integrity_metadata(&dio->work); in dm_integrity_map_continue()
1798 dec_in_flight(dio); in dm_integrity_map_continue()
1801 INIT_WORK(&dio->work, integrity_metadata); in dm_integrity_map_continue()
1802 queue_work(ic->metadata_wq, &dio->work); in dm_integrity_map_continue()
1808 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) in dm_integrity_map_continue()
1811 do_endio_flush(ic, dio); in dm_integrity_map_continue()
1817 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); in integrity_bio_wait() local
1819 dm_integrity_map_continue(dio, false); in integrity_bio_wait()