Lines Matching refs:SECTOR_SHIFT
108 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
429 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); in get_metadata_sector_and_offset()
430 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
432 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); in get_metadata_sector_and_offset()
433 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); in get_metadata_sector_and_offset()
650 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); in page_list_location()
651 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); in page_list_location()
663 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; in access_page_list()
786 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; in xor_journal()
949 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); in rw_journal_sectors()
950 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); in rw_journal_sectors()
1070 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); in copy_from_journal()
1071 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); in copy_from_journal()
1311 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); in dm_integrity_rw_tag()
1336 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { in dm_integrity_rw_tag()
1435 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); in dec_in_flight()
1485 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); in integrity_sector_checksum()
1529 …checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size +… in integrity_metadata()
1552 pos += ic->sectors_per_block << SECTOR_SHIFT; in integrity_metadata()
1661 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { in dm_integrity_map()
1714 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) in __journal_read_write()
1715 bv.bv_len = n_sectors << SECTOR_SHIFT; in __journal_read_write()
1716 n_sectors -= bv.bv_len >> SECTOR_SHIFT; in __journal_read_write()
1747 mem_ptr += 1 << SECTOR_SHIFT; in __journal_read_write()
1791 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
1819 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; in __journal_read_write()
1820 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); in __journal_read_write()
2016 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; in dm_integrity_map_continue()
2445 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); in integrity_recalc()
2511 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), in bitmap_block_work()
2512 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); in bitmap_block_work()
2559 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in bitmap_flush_work()
2848 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
2866 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
2898 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); in dm_integrity_resume()
2963 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_status()
3010 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3011 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; in dm_integrity_io_hints()
3012 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); in dm_integrity_io_hints()
3046 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT; in calculate_device_limits()
3058 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) in calculate_device_limits()
3059 >> (ic->log2_buffer_sectors + SECTOR_SHIFT); in calculate_device_limits()
3076 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); in initialize_superblock()
3148 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; in dm_integrity_set()
3236 end = end_offset + (1 << SECTOR_SHIFT); in dm_integrity_alloc_journal_scatterlist()
3330 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); in create_journal()
3515 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; in create_journal()
3695 if (val < 1 << SECTOR_SHIFT || in dm_integrity_ctr()
3696 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || in dm_integrity_ctr()
3702 ic->sectors_per_block = val >> SECTOR_SHIFT; in dm_integrity_ctr()
3735 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; in dm_integrity_ctr()
3739 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT; in dm_integrity_ctr()
3748 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); in dm_integrity_ctr()
3839 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); in dm_integrity_ctr()
3854 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { in dm_integrity_ctr()
3936 …bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3… in dm_integrity_ctr()
3975 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT); in dm_integrity_ctr()
3997 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT); in dm_integrity_ctr()
4013 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL); in dm_integrity_ctr()
4059 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT); in dm_integrity_ctr()
4060 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); in dm_integrity_ctr()
4061 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); in dm_integrity_ctr()
4166 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); in dm_integrity_dtr()