/Linux-v4.19/block/ |
D | bounce.c | 69 vto = kmap_atomic(to->bv_page); in bounce_copy_vec() 77 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 127 if (tovec.bv_page != fromvec.bv_page) { in copy_to_high_bio_irq() 133 vfrom = page_address(fromvec.bv_page) + in copy_to_high_bio_irq() 137 flush_dcache_page(tovec.bv_page); in copy_to_high_bio_irq() 155 if (bvec->bv_page != orig_vec.bv_page) { in bounce_end_io() 156 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); in bounce_end_io() 157 mempool_free(bvec->bv_page, pool); in bounce_end_io() 280 if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) in __blk_queue_bounce() 296 struct page *page = to->bv_page; in __blk_queue_bounce() [all …]
|
D | bio.c | 544 flush_dcache_page(bv.bv_page); in zero_fill_bio_iter() 687 if (page == prev->bv_page && in bio_add_pc_page() 710 bvec->bv_page = page; in bio_add_pc_page() 739 bvec->bv_page = NULL; in bio_add_pc_page() 771 if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) { in __bio_try_merge_page() 799 bv->bv_page = page; in __bio_add_page() 865 bv[idx].bv_page = pages[idx]; in __bio_iov_iter_get_pages() 969 src_p = kmap_atomic(src_bv.bv_page); in bio_copy_data_iter() 970 dst_p = kmap_atomic(dst_bv.bv_page); in bio_copy_data_iter() 979 flush_dcache_page(dst_bv.bv_page); in bio_copy_data_iter() [all …]
|
D | bio-integrity.c | 111 kfree(page_address(bip->bip_vec->bv_page) + in bio_integrity_free() 153 iv->bv_page = page; in bio_integrity_add_page() 177 void *prot_buf = page_address(bip->bip_vec->bv_page) + in bio_integrity_process() 186 void *kaddr = kmap_atomic(bv.bv_page); in bio_integrity_process()
|
D | blk-zoned.c | 195 if (!bv->bv_page) in blkdev_report_zones() 198 addr = kmap_atomic(bv->bv_page); in blkdev_report_zones() 227 __free_page(bv->bv_page); in blkdev_report_zones()
|
D | t10-pi.c | 224 pmap = kmap_atomic(iv.bv_page); in t10_pi_prepare() 279 pmap = kmap_atomic(iv.bv_page); in t10_pi_complete()
|
/Linux-v4.19/drivers/xen/ |
D | biomerge.c | 11 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); in xen_biovec_phys_mergeable() 12 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); in xen_biovec_phys_mergeable()
|
/Linux-v4.19/include/linux/ |
D | bvec.h | 31 struct page *bv_page; member 56 (__bvec_iter_bvec((bvec), (iter))->bv_page) 67 .bv_page = bvec_iter_page((bvec), (iter)), \
|
D | bio.h | 138 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 326 return bio_first_bvec_all(bio)->bv_page; in bio_first_page_all() 583 addr = (unsigned long) kmap_atomic(bvec->bv_page); in bvec_kmap_irq() 601 return page_address(bvec->bv_page) + bvec->bv_offset; in bvec_kmap_irq()
|
/Linux-v4.19/drivers/md/bcache/ |
D | util.c | 247 bv->bv_page = is_vmalloc_addr(base) in bch_bio_map() 274 bv->bv_page = alloc_page(gfp_mask); in bch_bio_alloc_pages() 275 if (!bv->bv_page) { in bch_bio_alloc_pages() 277 __free_page(bv->bv_page); in bch_bio_alloc_pages()
|
D | debug.c | 130 void *p1 = kmap_atomic(bv.bv_page); in bch_data_verify() 134 p2 = page_address(cbv.bv_page); in bch_data_verify()
|
/Linux-v4.19/lib/ |
D | iov_iter.c | 567 memcpy_to_page(v.bv_page, v.bv_offset, in _copy_to_iter() 668 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, in _copy_to_iter_mcsafe() 703 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in _copy_from_iter() 729 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in _copy_from_iter_full() 749 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in _copy_from_iter_nocache() 783 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, in _copy_from_iter_flushcache() 808 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in _copy_from_iter_full_nocache() 895 memzero_page(v.bv_page, v.bv_offset, v.bv_len), in iov_iter_zero() 918 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, in iov_iter_copy_from_user_atomic() 1215 get_page(*pages = v.bv_page); in iov_iter_get_pages() [all …]
|
/Linux-v4.19/fs/9p/ |
D | vfs_addr.c | 56 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage() 175 bvec.bv_page = page; in v9fs_vfs_writepage_locked()
|
/Linux-v4.19/drivers/md/ |
D | dm-log-writes.c | 193 if (block->vecs[i].bv_page) in free_pending_block() 194 __free_page(block->vecs[i].bv_page); in free_pending_block() 373 ret = bio_add_page(bio, block->vecs[i].bv_page, in log_one_block() 390 ret = bio_add_page(bio, block->vecs[i].bv_page, in log_one_block() 749 src = kmap_atomic(bv.bv_page); in log_writes_map() 754 block->vecs[i].bv_page = page; in log_writes_map()
|
/Linux-v4.19/drivers/block/zram/ |
D | zram_drv.c | 495 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async() 589 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, in write_to_bdev() 997 bvec.bv_page = page; in __zram_bvec_read() 1053 page = bvec->bv_page; in zram_bvec_read() 1066 void *dst = kmap_atomic(bvec->bv_page); in zram_bvec_read() 1089 struct page *page = bvec->bv_page; in __zram_bvec_write() 1233 src = kmap_atomic(bvec->bv_page); in zram_bvec_write() 1239 vec.bv_page = page; in zram_bvec_write() 1307 flush_dcache_page(bvec->bv_page); in zram_bvec_rw() 1430 bv.bv_page = page; in zram_rw_page()
|
/Linux-v4.19/drivers/nvdimm/ |
D | blk.c | 100 iobuf = kmap_atomic(bv.bv_page); in nd_blk_rw_integrity() 194 err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len, in nd_blk_make_request()
|
/Linux-v4.19/fs/btrfs/ |
D | compression.c | 172 SetPageChecked(bvec->bv_page); in end_compressed_bio_read() 404 return page_offset(last->bv_page) + last->bv_len + last->bv_offset; in bio_end_offset() 1139 start_byte = page_offset(bvec.bv_page) - disk_start; in btrfs_decompress_buf2page() 1163 kaddr = kmap_atomic(bvec.bv_page); in btrfs_decompress_buf2page() 1166 flush_dcache_page(bvec.bv_page); in btrfs_decompress_buf2page() 1178 start_byte = page_offset(bvec.bv_page) - disk_start; in btrfs_decompress_buf2page()
|
D | file-item.c | 218 offset = page_offset(bvec.bv_page) + bvec.bv_offset; in __btrfs_lookup_bio_sums() 456 offset = page_offset(bvec.bv_page) + bvec.bv_offset; in btrfs_csum_one_bio() 463 data = kmap_atomic(bvec.bv_page); in btrfs_csum_one_bio() 493 data = kmap_atomic(bvec.bv_page); in btrfs_csum_one_bio()
|
/Linux-v4.19/fs/crypto/ |
D | bio.c | 35 struct page *page = bv->bv_page; in __fscrypt_decrypt_bio()
|
/Linux-v4.19/drivers/block/ |
D | null_blk_zoned.c | 61 addr = kmap_atomic(bvec.bv_page); in null_zone_fill_bio()
|
D | loop.c | 323 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, in lo_write_transfer() 328 b.bv_page = page; in lo_write_transfer() 354 flush_dcache_page(bvec.bv_page); in lo_read_simple() 386 b.bv_page = page; in lo_read_transfer() 397 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, in lo_read_transfer() 402 flush_dcache_page(bvec.bv_page); in lo_read_transfer()
|
/Linux-v4.19/drivers/lightnvm/ |
D | pblk-read.c | 283 src_p = kmap_atomic(src_bv.bv_page); in pblk_end_partial_read() 284 dst_p = kmap_atomic(dst_bv.bv_page); in pblk_end_partial_read() 293 mempool_free(src_bv.bv_page, &pblk->page_bio_pool); in pblk_end_partial_read()
|
/Linux-v4.19/fs/xfs/ |
D | xfs_aops.c | 68 struct iomap_page *iop = to_iomap_page(bvec->bv_page); in xfs_finish_page_writeback() 71 SetPageError(bvec->bv_page); in xfs_finish_page_writeback() 79 end_page_writeback(bvec->bv_page); in xfs_finish_page_writeback()
|
/Linux-v4.19/drivers/s390/block/ |
D | dasd_fba.c | 474 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular() 512 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_build_cp_regular() 594 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_free_cp()
|
/Linux-v4.19/drivers/target/ |
D | target_core_file.c | 299 bvec[i].bv_page = sg_page(sg); in fd_execute_rw_aio() 349 bvec[i].bv_page = sg_page(sg); in fd_do_rw() 486 bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); in fd_execute_write_same()
|
/Linux-v4.19/fs/ext4/ |
D | readpage.c | 85 struct page *page = bv->bv_page; in mpage_end_io()
|