Searched refs:iov_offset (Results 1 – 13 of 13) sorted by relevance
| /Linux-v5.4/lib/ |
| D | iov_iter.c | 79 size_t skip = i->iov_offset; \ 101 size_t skip = i->iov_offset; \ 134 i->iov_offset = skip; \ 173 skip = i->iov_offset; in copy_page_to_iter_iovec() 236 i->iov_offset = skip; in copy_page_to_iter_iovec() 257 skip = i->iov_offset; in copy_page_from_iter_iovec() 320 i->iov_offset = skip; in copy_page_from_iter_iovec() 330 if (i->iov_offset) { in sanity() 338 if (unlikely(p->offset + p->len != i->iov_offset)) in sanity() 346 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset); in sanity() [all …]
|
| /Linux-v5.4/drivers/net/ethernet/google/gve/ |
| D | gve_tx.c | 95 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo() 106 iov[1].iov_offset = 0; /* Start of fifo*/ in gve_tx_alloc_fifo() 394 u64 iov_offset, u64 iov_len) in gve_dma_sync_for_device() argument 396 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; in gve_dma_sync_for_device() 397 u64 first_page = iov_offset / PAGE_SIZE; in gve_dma_sync_for_device() 445 info->iov[hdr_nfrags - 1].iov_offset); in gve_tx_add_skb() 448 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb() 451 info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb() 461 info->iov[i].iov_offset); in gve_tx_add_skb() 464 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb() [all …]
|
| D | gve.h | 89 u32 iov_offset; /* offset into this segment */ member
|
| /Linux-v5.4/include/linux/ |
| D | uio.h | 37 size_t iov_offset; member 109 .iov_base = iter->iov->iov_base + iter->iov_offset, in iov_iter_iovec() 111 iter->iov->iov_len - iter->iov_offset), in iov_iter_iovec()
|
| /Linux-v5.4/drivers/infiniband/hw/hfi1/ |
| D | user_sdma.c | 723 u64 iov_offset = *iov_offset_ptr; in user_sdma_txadd() local 728 offset = offset_in_page(base + iovec->offset + iov_offset); in user_sdma_txadd() 729 pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >> in user_sdma_txadd() 740 iov_offset += len; in user_sdma_txadd() 745 iovec->offset += iov_offset; in user_sdma_txadd() 747 iov_offset = 0; in user_sdma_txadd() 752 *iov_offset_ptr = iov_offset; in user_sdma_txadd() 788 u64 iov_offset = 0; in user_sdma_send_pkts() local 888 &queued, &data_sent, &iov_offset); in user_sdma_send_pkts() 901 iovec->offset += iov_offset; in user_sdma_send_pkts()
|
| /Linux-v5.4/block/ |
| D | bio.c | 842 if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len)) in __bio_iov_bvec_add_pages() 845 len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count); in __bio_iov_bvec_add_pages() 847 bv->bv_offset + iter->iov_offset); in __bio_iov_bvec_add_pages()
|
| /Linux-v5.4/drivers/nvme/host/ |
| D | tcp.c | 186 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset() 191 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, in nvme_tcp_req_cur_length() 197 return req->iter.iov_offset; in nvme_tcp_req_offset() 236 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
|
| /Linux-v5.4/net/9p/ |
| D | trans_virtio.c | 346 p = data->kvec->iov_base + data->iov_offset; in p9_get_mapped_pages()
|
| /Linux-v5.4/fs/ |
| D | splice.c | 313 to.iov_offset = 0; in generic_file_splice_read()
|
| D | io_uring.c | 1229 iter->iov_offset = offset & ~PAGE_MASK; in io_import_fixed()
|
| /Linux-v5.4/drivers/block/ |
| D | loop.c | 559 iter.iov_offset = offset; in lo_rw_aio()
|
| /Linux-v5.4/fs/cifs/ |
| D | file.c | 2837 from->iov_offset, from->count); in cifs_write_from_iter() 3560 direct_iov.iov_offset, in cifs_send_async_read()
|
| /Linux-v5.4/fs/fuse/ |
| D | file.c | 1338 return (unsigned long)ii->iov->iov_base + ii->iov_offset; in fuse_get_user_addr()
|