/Linux-v5.4/include/linux/ |
D | bvec.h | 45 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) argument 48 #define mp_bvec_iter_page(bvec, iter) \ argument 49 (__bvec_iter_bvec((bvec), (iter))->bv_page) 51 #define mp_bvec_iter_len(bvec, iter) \ argument 53 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 55 #define mp_bvec_iter_offset(bvec, iter) \ argument 56 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 58 #define mp_bvec_iter_page_idx(bvec, iter) \ argument 59 (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) 61 #define mp_bvec_iter_bvec(bvec, iter) \ argument [all …]
|
D | bio.h | 172 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) argument 531 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) in bvec_kmap_irq() argument 540 addr = (unsigned long) kmap_atomic(bvec->bv_page); in bvec_kmap_irq() 544 return (char *) addr + bvec->bv_offset; in bvec_kmap_irq() 556 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) in bvec_kmap_irq() argument 558 return page_address(bvec->bv_page) + bvec->bv_offset; in bvec_kmap_irq()
|
/Linux-v5.4/drivers/block/zram/ |
D | zram_drv.c | 55 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, 145 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument 147 return bvec->bv_len != PAGE_SIZE; in is_partial_io() 150 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument 180 static void update_position(u32 *index, int *offset, struct bio_vec *bvec) in update_position() argument 182 *index += (*offset + bvec->bv_len) / PAGE_SIZE; in update_position() 183 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position() 589 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_async() argument 600 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async() 658 struct bio_vec bvec; in writeback_store() local [all …]
|
/Linux-v5.4/block/ |
D | bio.c | 685 struct bio_vec *bvec; in __bio_add_pc_page() local 704 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_add_pc_page() 705 if (bvec_gap_to_prev(q, bvec, offset)) in __bio_add_pc_page() 715 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_pc_page() 716 bvec->bv_page = page; in __bio_add_pc_page() 717 bvec->bv_len = len; in __bio_add_pc_page() 718 bvec->bv_offset = offset; in __bio_add_pc_page() 824 struct bio_vec *bvec; in bio_release_pages() local 829 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_release_pages() 830 if (mark_dirty && !PageCompound(bvec->bv_page)) in bio_release_pages() [all …]
|
D | blk-merge.c | 399 struct bio_vec *bvec, struct scatterlist *sglist, in blk_bvec_map_sg() argument 402 unsigned nbytes = bvec->bv_len; in blk_bvec_map_sg() 406 unsigned offset = bvec->bv_offset + total; in blk_bvec_map_sg() 408 struct page *page = bvec->bv_page; in blk_bvec_map_sg() 442 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg_merge() argument 446 int nbytes = bvec->bv_len; in __blk_segment_map_sg_merge() 454 if (!biovec_phys_mergeable(q, bvprv, bvec)) in __blk_segment_map_sg_merge() 466 struct bio_vec uninitialized_var(bvec), bvprv = { NULL }; in __blk_bios_map_sg() 472 bio_for_each_bvec(bvec, bio, iter) { in __blk_bios_map_sg() 479 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) in __blk_bios_map_sg() [all …]
|
D | bounce.c | 165 struct bio_vec *bvec, orig_vec; in bounce_end_io() local 172 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io() 174 if (bvec->bv_page != orig_vec.bv_page) { in bounce_end_io() 175 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); in bounce_end_io() 176 mempool_free(bvec->bv_page, pool); in bounce_end_io()
|
/Linux-v5.4/drivers/target/ |
D | target_core_file.c | 271 struct bio_vec *bvec; in fd_execute_rw_aio() local 279 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); in fd_execute_rw_aio() 280 if (!bvec) { in fd_execute_rw_aio() 286 bvec[i].bv_page = sg_page(sg); in fd_execute_rw_aio() 287 bvec[i].bv_len = sg->length; in fd_execute_rw_aio() 288 bvec[i].bv_offset = sg->offset; in fd_execute_rw_aio() 293 iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); in fd_execute_rw_aio() 310 kfree(bvec); in fd_execute_rw_aio() 324 struct bio_vec *bvec; in fd_do_rw() local 329 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); in fd_do_rw() [all …]
|
/Linux-v5.4/drivers/nvme/target/ |
D | io-cmd-file.c | 108 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec() 122 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() 124 kfree(req->f.bvec); in nvmet_file_io_done() 126 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done() 156 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); in nvmet_file_execute_io() 157 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io() 158 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io() 241 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), in nvmet_file_execute_rw() 244 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw() 246 if (unlikely(!req->f.bvec)) { in nvmet_file_execute_rw() [all …]
|
/Linux-v5.4/fs/9p/ |
D | vfs_addr.c | 42 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage() local 54 iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE); in v9fs_fid_readpage() 154 struct bio_vec bvec; in v9fs_vfs_writepage_locked() local 162 bvec.bv_page = page; in v9fs_vfs_writepage_locked() 163 bvec.bv_offset = 0; in v9fs_vfs_writepage_locked() 164 bvec.bv_len = len; in v9fs_vfs_writepage_locked() 165 iov_iter_bvec(&from, WRITE, &bvec, 1, len); in v9fs_vfs_writepage_locked()
|
/Linux-v5.4/drivers/block/ |
D | loop.c | 267 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) in lo_write_bvec() argument 272 iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); in lo_write_bvec() 278 if (likely(bw == bvec->bv_len)) in lo_write_bvec() 283 (unsigned long long)*ppos, bvec->bv_len); in lo_write_bvec() 292 struct bio_vec bvec; in lo_write_simple() local 296 rq_for_each_segment(bvec, rq, iter) { in lo_write_simple() 297 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); in lo_write_simple() 314 struct bio_vec bvec, b; in lo_write_transfer() local 323 rq_for_each_segment(bvec, rq, iter) { in lo_write_transfer() 324 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, in lo_write_transfer() [all …]
|
D | brd.c | 288 struct bio_vec bvec; in brd_make_request() local 296 bio_for_each_segment(bvec, bio, iter) { in brd_make_request() 297 unsigned int len = bvec.bv_len; in brd_make_request() 300 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, in brd_make_request()
|
/Linux-v5.4/arch/m68k/emu/ |
D | nfblock.c | 65 struct bio_vec bvec; in nfhd_make_request() local 72 bio_for_each_segment(bvec, bio, iter) { in nfhd_make_request() 73 len = bvec.bv_len; in nfhd_make_request() 76 page_to_phys(bvec.bv_page) + bvec.bv_offset); in nfhd_make_request()
|
/Linux-v5.4/drivers/s390/block/ |
D | xpram.c | 188 struct bio_vec bvec; in xpram_make_request() local 206 bio_for_each_segment(bvec, bio, iter) { in xpram_make_request() 208 kmap(bvec.bv_page) + bvec.bv_offset; in xpram_make_request() 209 bytes = bvec.bv_len; in xpram_make_request()
|
D | dcssblk.c | 858 struct bio_vec bvec; in dcssblk_make_request() local 895 bio_for_each_segment(bvec, bio, iter) { in dcssblk_make_request() 897 page_address(bvec.bv_page) + bvec.bv_offset; in dcssblk_make_request() 899 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) in dcssblk_make_request() 904 bvec.bv_len); in dcssblk_make_request() 907 bvec.bv_len); in dcssblk_make_request() 909 bytes_done += bvec.bv_len; in dcssblk_make_request()
|
/Linux-v5.4/Documentation/block/ |
D | biovecs.rst | 20 bytes completed in the current bvec. 50 exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c, 58 coding bvec iterators before, and having common implementation considerably 63 it somewhere else if there was an error) had to save the entire bvec array 66 * Biovecs can be shared between multiple bios - a bvec iter can represent an 74 bios with more than a single bvec! Now, we can efficiently split arbitrary 86 fine to _most_ devices, but since accessing the raw bvec array was the 88 since all drivers _must_ go through the bvec iterator - and have been 142 * The following helpers iterate over multi-page bvec. The passed 'struct
|
/Linux-v5.4/drivers/nvdimm/ |
D | blk.c | 171 struct bio_vec bvec; in nd_blk_make_request() local 182 bio_for_each_segment(bvec, bio, iter) { in nd_blk_make_request() 183 unsigned int len = bvec.bv_len; in nd_blk_make_request() 186 err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len, in nd_blk_make_request() 187 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_make_request()
|
/Linux-v5.4/arch/xtensa/platforms/iss/ |
D | simdisk.c | 107 struct bio_vec bvec; in simdisk_make_request() local 111 bio_for_each_segment(bvec, bio, iter) { in simdisk_make_request() 112 char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset; in simdisk_make_request() 113 unsigned len = bvec.bv_len >> SECTOR_SHIFT; in simdisk_make_request()
|
/Linux-v5.4/fs/btrfs/ |
D | file-item.c | 155 struct bio_vec bvec; in __btrfs_lookup_bio_sums() local 211 bio_for_each_segment(bvec, bio, iter) { in __btrfs_lookup_bio_sums() 212 page_bytes_left = bvec.bv_len; in __btrfs_lookup_bio_sums() 217 offset = page_offset(bvec.bv_page) + bvec.bv_offset; in __btrfs_lookup_bio_sums() 441 struct bio_vec bvec; in btrfs_csum_one_bio() local 472 bio_for_each_segment(bvec, bio, iter) { in btrfs_csum_one_bio() 474 offset = page_offset(bvec.bv_page) + bvec.bv_offset; in btrfs_csum_one_bio() 482 bvec.bv_len + fs_info->sectorsize in btrfs_csum_one_bio() 512 data = kmap_atomic(bvec.bv_page); in btrfs_csum_one_bio() 513 crypto_shash_update(shash, data + bvec.bv_offset in btrfs_csum_one_bio()
|
D | compression.c | 186 struct bio_vec *bvec; in end_compressed_bio_read() local 194 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) in end_compressed_bio_read() 195 SetPageChecked(bvec->bv_page); in end_compressed_bio_read() 1134 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); in btrfs_decompress_buf2page() local 1140 start_byte = page_offset(bvec.bv_page) - disk_start; in btrfs_decompress_buf2page() 1160 bytes = min_t(unsigned long, bvec.bv_len, in btrfs_decompress_buf2page() 1164 kaddr = kmap_atomic(bvec.bv_page); in btrfs_decompress_buf2page() 1165 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); in btrfs_decompress_buf2page() 1167 flush_dcache_page(bvec.bv_page); in btrfs_decompress_buf2page() 1177 bvec = bio_iter_iovec(bio, bio->bi_iter); in btrfs_decompress_buf2page() [all …]
|
/Linux-v5.4/fs/gfs2/ |
D | lops.c | 171 struct bio_vec *bvec, in gfs2_end_log_write_bh() argument 175 struct page *page = bvec->bv_page; in gfs2_end_log_write_bh() 179 size = bvec->bv_len; in gfs2_end_log_write_bh() 180 while (bh_offset(bh) < bvec->bv_offset) in gfs2_end_log_write_bh() 206 struct bio_vec *bvec; in gfs2_end_log_write() local 216 bio_for_each_segment_all(bvec, bio, iter_all) { in gfs2_end_log_write() 217 page = bvec->bv_page; in gfs2_end_log_write() 219 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); in gfs2_end_log_write() 387 struct bio_vec *bvec; in gfs2_end_log_read() local 390 bio_for_each_segment_all(bvec, bio, iter_all) { in gfs2_end_log_read() [all …]
|
D | meta_io.c | 188 struct bio_vec *bvec; in gfs2_meta_read_endio() local 191 bio_for_each_segment_all(bvec, bio, iter_all) { in gfs2_meta_read_endio() 192 struct page *page = bvec->bv_page; in gfs2_meta_read_endio() 194 unsigned int len = bvec->bv_len; in gfs2_meta_read_endio() 196 while (bh_offset(bh) < bvec->bv_offset) in gfs2_meta_read_endio()
|
/Linux-v5.4/fs/ext4/ |
D | page-io.c | 64 struct bio_vec *bvec; in ext4_finish_bio() local 67 bio_for_each_segment_all(bvec, bio, iter_all) { in ext4_finish_bio() 68 struct page *page = bvec->bv_page; in ext4_finish_bio() 71 unsigned bio_start = bvec->bv_offset; in ext4_finish_bio() 72 unsigned bio_end = bio_start + bvec->bv_len; in ext4_finish_bio()
|
/Linux-v5.4/lib/ |
D | iov_iter.c | 70 for_each_bvec(__v, i->bvec, __bi, __start) { \ 103 const struct bio_vec *bvec = i->bvec; \ 107 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ 108 i->nr_segs -= i->bvec - bvec; \ 1086 const struct bio_vec *bvec = i->bvec; in iov_iter_revert() local 1088 size_t n = (--bvec)->bv_len; in iov_iter_revert() 1091 i->bvec = bvec; in iov_iter_revert() 1125 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count() 1145 const struct bio_vec *bvec, unsigned long nr_segs, in iov_iter_bvec() argument 1150 i->bvec = bvec; in iov_iter_bvec() [all …]
|
/Linux-v5.4/drivers/md/ |
D | dm-io.c | 211 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, in bio_get_page() local 214 *p = bvec.bv_page; in bio_get_page() 215 *len = bvec.bv_len; in bio_get_page() 216 *offset = bvec.bv_offset; in bio_get_page() 219 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; in bio_get_page()
|
/Linux-v5.4/fs/iomap/ |
D | buffered-io.c | 151 iomap_read_page_end_io(struct bio_vec *bvec, int error) in iomap_read_page_end_io() argument 153 struct page *page = bvec->bv_page; in iomap_read_page_end_io() 160 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); in iomap_read_page_end_io() 170 struct bio_vec *bvec; in iomap_read_end_io() local 173 bio_for_each_segment_all(bvec, bio, iter_all) in iomap_read_end_io() 174 iomap_read_page_end_io(bvec, error); in iomap_read_end_io() 532 struct bio_vec bvec; in iomap_read_page_sync() local 541 bio_init(&bio, &bvec, 1); in iomap_read_page_sync()
|