Home
last modified time | relevance | path

Searched refs:bv_len (Results 1 – 25 of 98) sorted by relevance

1234

/Linux-v5.15/include/linux/
Dbvec.h34 unsigned int bv_len; member
67 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
78 .bv_len = mp_bvec_iter_len((bvec), (iter)), \
97 .bv_len = bvec_iter_len((bvec), (iter)), \
115 while (bytes && bytes >= bv[idx].bv_len) { in bvec_iter_advance()
116 bytes -= bv[idx].bv_len; in bvec_iter_advance()
134 if (done == bv[iter->bi_idx].bv_len) { in bvec_iter_advance_single()
146 bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
177 bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, in bvec_advance()
178 bvec->bv_len - iter_all->done); in bvec_advance()
[all …]
Dbio.h92 return bio_iovec(bio).bv_len; in bio_cur_bytes()
170 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
179 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
193 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
279 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec()
296 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec()
/Linux-v5.15/drivers/block/
Dn64cart.c69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec()
79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec()
83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec()
97 pos += bvec.bv_len; in n64cart_submit_bio()
Dloop.c325 iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); in lo_write_bvec()
331 if (likely(bw == bvec->bv_len)) in lo_write_bvec()
336 (unsigned long long)*ppos, bvec->bv_len); in lo_write_bvec()
378 bvec.bv_offset, bvec.bv_len, pos >> 9); in lo_write_transfer()
384 b.bv_len = bvec.bv_len; in lo_write_transfer()
403 iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len); in lo_read_simple()
410 if (len != bvec.bv_len) { in lo_read_simple()
442 b.bv_len = bvec.bv_len; in lo_read_transfer()
444 iov_iter_bvec(&i, READ, &b, 1, b.bv_len); in lo_read_transfer()
458 if (len != bvec.bv_len) { in lo_read_transfer()
/Linux-v5.15/drivers/md/
Ddm-ebs-target.c69 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec() local
74 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec()
80 while (bv_len) { in __ebs_rw_bvec()
81 cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len); in __ebs_rw_bvec()
84 if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) in __ebs_rw_bvec()
111 bv_len -= cur_len; in __ebs_rw_bvec()
Ddm-log-writes.c384 block->vecs[i].bv_len, 0); in log_one_block()
385 if (ret != block->vecs[i].bv_len) { in log_one_block()
402 block->vecs[i].bv_len, 0); in log_one_block()
403 if (ret != block->vecs[i].bv_len) { in log_one_block()
409 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block()
770 memcpy(dst, src + bv.bv_offset, bv.bv_len); in log_writes_map()
774 block->vecs[i].bv_len = bv.bv_len; in log_writes_map()
/Linux-v5.15/block/
Dblk-integrity.c40 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg()
43 seg_size += iv.bv_len; in blk_rq_count_integrity_sg()
47 seg_size = iv.bv_len; in blk_rq_count_integrity_sg()
82 if (sg->length + iv.bv_len > queue_max_segment_size(q)) in blk_rq_map_integrity_sg()
85 sg->length += iv.bv_len; in blk_rq_map_integrity_sg()
95 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); in blk_rq_map_integrity_sg()
Dblk-merge.c203 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs()
223 return len > 0 || bv->bv_len > max_len; in bvec_split_segs()
265 sectors + (bv.bv_len >> 9) <= max_sectors && in blk_bio_segment_split()
266 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in blk_bio_segment_split()
268 sectors += bv.bv_len >> 9; in blk_bio_segment_split()
334 ((*bio)->bi_io_vec[0].bv_len + in __blk_queue_split()
428 unsigned nbytes = bvec->bv_len; in blk_bvec_map_sg()
463 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in __blk_bvec_map_sg()
473 int nbytes = bvec->bv_len; in __blk_segment_map_sg_merge()
509 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) in __blk_bios_map_sg()
Dblk-map.c56 bvec->bv_len, in bio_copy_from_iter()
62 if (ret < bvec->bv_len) in bio_copy_from_iter()
87 bvec->bv_len, in bio_copy_to_iter()
93 if (ret < bvec->bv_len) in bio_copy_to_iter()
317 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
404 p += bvec->bv_len; in bio_copy_kern_endio_read()
Dbounce.c97 bio_advance_iter(from, &from_iter, tovec.bv_len); in copy_to_high_bio_irq()
117 bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len); in bounce_end_io()
221 sectors += from.bv_len >> 9; in __blk_queue_bounce()
Dbio.c563 if (done + bv.bv_len > new_size) { in bio_truncate()
570 zero_user(bv.bv_page, offset, bv.bv_len - offset); in bio_truncate()
573 done += bv.bv_len; in bio_truncate()
779 size_t bv_end = bv->bv_offset + bv->bv_len; in page_is_mergeable()
810 if (bv->bv_len + len > queue_max_segment_size(q)) in bio_try_merge_hw_seg()
861 bvec->bv_len = len; in bio_add_hw_page()
955 bv->bv_len += len; in __bio_try_merge_page()
984 bv->bv_len = len; in __bio_add_page()
1290 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter()
Dblk.h69 if (addr1 + vec1->bv_len != addr2) in biovec_phys_mergeable()
73 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) in biovec_phys_mergeable()
82 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); in __bvec_gap_to_prev()
Dt10-pi.c154 for (j = 0; j < iv.bv_len; j += tuple_sz) { in t10_pi_type1_prepare()
200 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { in t10_pi_type1_complete()
Dblk-crypto-fallback.c221 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_split_bio_if_needed()
336 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { in blk_crypto_fallback_encrypt_bio()
423 for (i = 0; i < bv.bv_len; i += data_unit_size) { in blk_crypto_fallback_decrypt_bio()
/Linux-v5.15/drivers/md/bcache/
Dutil.c244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
251 base += bv->bv_len; in bch_bio_map()
254 size -= bv->bv_len; in bch_bio_map()
Ddebug.c138 bv.bv_len), in bch_data_verify()
145 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
/Linux-v5.15/fs/squashfs/
Dblock.c47 int bytes_to_copy = min_t(int, bvec->bv_len - offset, in copy_bio_to_actor()
65 if (offset >= bvec->bv_len) { in copy_bio_to_actor()
181 if (offset < bvec->bv_len - 1) { in squashfs_read_data()
Dzlib_wrapper.c78 avail = min(length, ((int)bvec->bv_len) - offset); in zlib_uncompress()
/Linux-v5.15/drivers/s390/block/
Ddasd_fba.c462 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular()
465 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular()
466 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular()
467 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular()
509 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular()
513 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_build_cp_regular()
587 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_free_cp()
598 memcpy(dst, cda, bv.bv_len); in dasd_fba_free_cp()
/Linux-v5.15/drivers/block/zram/
Dzram_drv.c151 return bvec->bv_len != PAGE_SIZE; in is_partial_io()
186 *index += (*offset + bvec->bv_len) / PAGE_SIZE; in update_position()
187 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position()
596 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async()
670 bvec.bv_len = PAGE_SIZE; in writeback_store()
725 bio_add_page(&bio, bvec.bv_page, bvec.bv_len, in writeback_store()
1243 bvec.bv_len = PAGE_SIZE; in __zram_bvec_read()
1312 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); in zram_bvec_read()
1465 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); in zram_bvec_write()
1470 vec.bv_len = PAGE_SIZE; in zram_bvec_write()
[all …]
/Linux-v5.15/drivers/block/rsxx/
Ddma.c674 unsigned int bv_len; in rsxx_dma_queue_bio() local
692 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio()
694 while (bv_len > 0) { in rsxx_dma_queue_bio()
706 bv_len -= RSXX_HW_BLK_SIZE; in rsxx_dma_queue_bio()
710 bv_len = bvec.bv_len; in rsxx_dma_queue_bio()
713 while (bv_len > 0) { in rsxx_dma_queue_bio()
717 dma_len = min(bv_len, in rsxx_dma_queue_bio()
732 bv_len -= dma_len; in rsxx_dma_queue_bio()
/Linux-v5.15/drivers/xen/
Dbiomerge.c15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
/Linux-v5.15/net/ceph/
Dmessenger_v2.c153 bv.bv_len = min(iov_iter_count(it), in do_try_sendpage()
154 it->bvec->bv_len - it->iov_offset); in do_try_sendpage()
167 bv.bv_offset, bv.bv_len, in do_try_sendpage()
170 iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len); in do_try_sendpage()
235 iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len); in set_in_bvec()
280 con->v2.out_bvec.bv_len); in set_out_bvec()
290 con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE); in set_out_bvec_zero()
293 con->v2.out_bvec.bv_len); in set_out_bvec_zero()
868 bv->bv_len = len; in get_bvec_at()
903 ceph_msg_data_advance(cursor, bv.bv_len); in calc_sg_cnt_cursor()
[all …]
/Linux-v5.15/fs/9p/
Dvfs_addr.c41 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage()
165 bvec.bv_len = len; in v9fs_vfs_writepage_locked()
/Linux-v5.15/drivers/nvme/target/
Dio-cmd-file.c98 bv->bv_len = sg->length; in nvmet_file_init_bvec()
168 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()
169 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()

1234