Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/fault-inject-usercopy.h>
23 base = __p + i->iov_offset; \
24 len -= (STEP); \
25 i->iov_offset += len; \
32 size_t skip = i->iov_offset; \
34 len = min(n, __p->iov_len - skip); \
36 base = __p->iov_base + skip; \
37 len -= (STEP); \
40 n -= len; \
41 if (skip < __p->iov_len) \
47 i->iov_offset = skip; \
53 unsigned skip = i->iov_offset; \
55 unsigned offset = p->bv_offset + skip; \
57 void *kaddr = kmap_local_page(p->bv_page + \
60 len = min(min(n, (size_t)(p->bv_len - skip)), \
61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
64 len -= left; \
67 if (skip == p->bv_len) { \
71 n -= len; \
75 i->iov_offset = skip; \
83 loff_t start = i->xarray_start + i->iov_offset; \
85 XA_STATE(xas, i->xarray, index); \
87 len = PAGE_SIZE - offset_in_page(start); \
104 len -= left; \
106 n -= len; \
115 i->iov_offset += __off; \
120 if (unlikely(i->count < n)) \
121 n = i->count; \
127 i->ubuf, (I)) \
129 const struct iovec *iov = i->iov; \
134 i->nr_segs -= iov - i->iov; \
135 i->iov = iov; \
137 const struct bio_vec *bvec = i->bvec; \
142 i->nr_segs -= bvec - i->bvec; \
143 i->bvec = bvec; \
145 const struct kvec *kvec = i->kvec; \
150 i->nr_segs -= kvec - i->kvec; \
151 i->kvec = kvec; \
158 i->count -= n; \
192 return &pipe->bufs[slot & (pipe->ring_size - 1)]; in pipe_buf()
198 struct pipe_inode_info *pipe = i->pipe; in sanity()
199 unsigned int p_head = pipe->head; in sanity()
200 unsigned int p_tail = pipe->tail; in sanity()
202 unsigned int i_head = i->head; in sanity()
205 if (i->last_offset) { in sanity()
208 goto Bad; // pipe must be non-empty in sanity()
209 if (unlikely(i_head != p_head - 1)) in sanity()
213 if (unlikely(p->offset + p->len != abs(i->last_offset))) in sanity()
221 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); in sanity()
223 p_head, p_tail, pipe->ring_size); in sanity()
224 for (idx = 0; idx < pipe->ring_size; idx++) in sanity()
226 pipe->bufs[idx].ops, in sanity()
227 pipe->bufs[idx].page, in sanity()
228 pipe->bufs[idx].offset, in sanity()
229 pipe->bufs[idx].len); in sanity()
237 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) in push_anon() argument
239 struct page *page = alloc_page(GFP_USER); in push_anon() local
240 if (page) { in push_anon()
241 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); in push_anon()
244 .page = page, in push_anon()
246 .len = size in push_anon()
249 return page; in push_anon()
252 static void push_page(struct pipe_inode_info *pipe, struct page *page, in push_page() argument
253 unsigned int offset, unsigned int size) in push_page() argument
255 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); in push_page()
258 .page = page, in push_page()
260 .len = size in push_page()
262 get_page(page); in push_page()
267 if (buf->ops == &default_pipe_buf_ops) in last_offset()
268 return buf->len; // buf->offset is 0 for those in last_offset()
270 return -(buf->offset + buf->len); in last_offset()
273 static struct page *append_pipe(struct iov_iter *i, size_t size, in append_pipe() argument
276 struct pipe_inode_info *pipe = i->pipe; in append_pipe()
277 int offset = i->last_offset; in append_pipe()
279 struct page *page; in append_pipe() local
283 buf = pipe_buf(pipe, pipe->head - 1); in append_pipe()
284 size = min_t(size_t, size, PAGE_SIZE - offset); in append_pipe()
285 buf->len += size; in append_pipe()
286 i->last_offset += size; in append_pipe()
287 i->count -= size; in append_pipe()
289 return buf->page; in append_pipe()
293 size = min_t(size_t, size, PAGE_SIZE); in append_pipe()
294 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) in append_pipe()
296 page = push_anon(pipe, size); in append_pipe()
297 if (!page) in append_pipe()
299 i->head = pipe->head - 1; in append_pipe()
300 i->last_offset = size; in append_pipe()
301 i->count -= size; in append_pipe()
302 return page; in append_pipe()
305 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, in copy_page_to_iter_pipe() argument
308 struct pipe_inode_info *pipe = i->pipe; in copy_page_to_iter_pipe()
309 unsigned int head = pipe->head; in copy_page_to_iter_pipe()
311 if (unlikely(bytes > i->count)) in copy_page_to_iter_pipe()
312 bytes = i->count; in copy_page_to_iter_pipe()
320 if (offset && i->last_offset == -offset) { // could we merge it? in copy_page_to_iter_pipe()
321 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); in copy_page_to_iter_pipe()
322 if (buf->page == page) { in copy_page_to_iter_pipe()
323 buf->len += bytes; in copy_page_to_iter_pipe()
324 i->last_offset -= bytes; in copy_page_to_iter_pipe()
325 i->count -= bytes; in copy_page_to_iter_pipe()
329 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) in copy_page_to_iter_pipe()
332 push_page(pipe, page, offset, bytes); in copy_page_to_iter_pipe()
333 i->last_offset = -(offset + bytes); in copy_page_to_iter_pipe()
334 i->head = head; in copy_page_to_iter_pipe()
335 i->count -= bytes; in copy_page_to_iter_pipe()
340 * fault_in_iov_iter_readable - fault in iov iterator for reading
342 * @size: maximum length
345 * @size. For each iovec, fault in each page that constitutes the iovec.
350 * Always returns 0 for non-userspace iterators.
352 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
355 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
356 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
357 return size - n; in fault_in_iov_iter_readable()
359 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
363 size -= count; in fault_in_iov_iter_readable()
364 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
365 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable()
370 ret = fault_in_readable(p->iov_base + skip, len); in fault_in_iov_iter_readable()
371 count -= len - ret; in fault_in_iov_iter_readable()
375 return count + size; in fault_in_iov_iter_readable()
382 * fault_in_iov_iter_writeable - fault in iov iterator for writing
384 * @size: maximum length
387 * hardware page faults. This is primarily useful when we already know that
393 * Always returns 0 for non-user-space iterators.
395 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
398 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
399 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
400 return size - n; in fault_in_iov_iter_writeable()
402 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
406 size -= count; in fault_in_iov_iter_writeable()
407 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
408 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable()
413 ret = fault_in_safe_writeable(p->iov_base + skip, len); in fault_in_iov_iter_writeable()
414 count -= len - ret; in fault_in_iov_iter_writeable()
418 return count + size; in fault_in_iov_iter_writeable()
445 struct pipe_inode_info *pipe = i->pipe; in pipe_npages()
446 int used = pipe->head - pipe->tail; in pipe_npages()
447 int off = i->last_offset; in pipe_npages()
449 *npages = max((int)pipe->max_usage - used, 0); in pipe_npages()
463 if (unlikely(bytes > i->count)) in copy_pipe_to_iter()
464 bytes = i->count; in copy_pipe_to_iter()
471 for (size_t n = bytes; n; n -= chunk) { in copy_pipe_to_iter()
472 struct page *page = append_pipe(i, n, &off); in copy_pipe_to_iter() local
473 chunk = min_t(size_t, n, PAGE_SIZE - off); in copy_pipe_to_iter()
474 if (!page) in copy_pipe_to_iter()
475 return bytes - n; in copy_pipe_to_iter()
476 memcpy_to_page(page, off, addr, chunk); in copy_pipe_to_iter()
496 if (unlikely(bytes > i->count)) in csum_and_copy_to_pipe_iter()
497 bytes = i->count; in csum_and_copy_to_pipe_iter()
505 struct page *page = append_pipe(i, bytes, &r); in csum_and_copy_to_pipe_iter() local
508 if (!page) in csum_and_copy_to_pipe_iter()
510 chunk = min_t(size_t, bytes, PAGE_SIZE - r); in csum_and_copy_to_pipe_iter()
511 p = kmap_local_page(page); in csum_and_copy_to_pipe_iter()
515 bytes -= chunk; in csum_and_copy_to_pipe_iter()
552 if (unlikely(bytes > i->count)) in copy_mc_pipe_to_iter()
553 bytes = i->count; in copy_mc_pipe_to_iter()
561 struct page *page = append_pipe(i, bytes, &off); in copy_mc_pipe_to_iter() local
565 if (!page) in copy_mc_pipe_to_iter()
567 chunk = min_t(size_t, bytes, PAGE_SIZE - off); in copy_mc_pipe_to_iter()
568 p = kmap_local_page(page); in copy_mc_pipe_to_iter()
570 chunk -= rem; in copy_mc_pipe_to_iter()
573 bytes -= chunk; in copy_mc_pipe_to_iter()
583 * _copy_mc_to_iter - copy to iter with source memory error exception handling
589 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
590 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
596 * byte-by-byte until the fault happens again. Re-triggering machine
598 * alignment and poison alignment assumptions to avoid re-triggering
657 * _copy_from_iter_flushcache - write destination through cpu cache
662 * The pmem driver arranges for filesystem-dax to use this facility via
668 * instructions that strand dirty-data in the cache.
688 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) in page_copy_sane() argument
690 struct page *head; in page_copy_sane()
694 * The general case needs to access the page order in order in page_copy_sane()
695 * to compute the page size. in page_copy_sane()
696 * However, we mostly deal with order-0 pages and thus can in page_copy_sane()
698 * page orders. in page_copy_sane()
703 head = compound_head(page); in page_copy_sane()
704 v += (page - head) << PAGE_SHIFT; in page_copy_sane()
712 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, in copy_page_to_iter() argument
716 if (unlikely(!page_copy_sane(page, offset, bytes))) in copy_page_to_iter()
719 return copy_page_to_iter_pipe(page, offset, bytes, i); in copy_page_to_iter()
720 page += offset / PAGE_SIZE; // first subpage in copy_page_to_iter()
723 void *kaddr = kmap_local_page(page); in copy_page_to_iter()
724 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter()
728 bytes -= n; in copy_page_to_iter()
733 page++; in copy_page_to_iter()
741 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, in copy_page_from_iter() argument
745 if (!page_copy_sane(page, offset, bytes)) in copy_page_from_iter()
747 page += offset / PAGE_SIZE; // first subpage in copy_page_from_iter()
750 void *kaddr = kmap_local_page(page); in copy_page_from_iter()
751 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter()
755 bytes -= n; in copy_page_from_iter()
760 page++; in copy_page_from_iter()
772 if (unlikely(bytes > i->count)) in pipe_zero()
773 bytes = i->count; in pipe_zero()
780 for (size_t n = bytes; n; n -= chunk) { in pipe_zero()
781 struct page *page = append_pipe(i, n, &off); in pipe_zero() local
784 if (!page) in pipe_zero()
785 return bytes - n; in pipe_zero()
786 chunk = min_t(size_t, n, PAGE_SIZE - off); in pipe_zero()
787 p = kmap_local_page(page); in pipe_zero()
807 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, in copy_page_from_iter_atomic() argument
810 char *kaddr = kmap_atomic(page), *p = kaddr + offset; in copy_page_from_iter_atomic()
811 if (unlikely(!page_copy_sane(page, offset, bytes))) { in copy_page_from_iter_atomic()
829 static void pipe_advance(struct iov_iter *i, size_t size) in pipe_advance() argument
831 struct pipe_inode_info *pipe = i->pipe; in pipe_advance()
832 int off = i->last_offset; in pipe_advance()
834 if (!off && !size) { in pipe_advance()
835 pipe_discard_from(pipe, i->start_head); // discard everything in pipe_advance()
838 i->count -= size; in pipe_advance()
840 struct pipe_buffer *buf = pipe_buf(pipe, i->head); in pipe_advance()
842 size += abs(off) - buf->offset; in pipe_advance()
843 if (size <= buf->len) { in pipe_advance()
844 buf->len = size; in pipe_advance()
845 i->last_offset = last_offset(buf); in pipe_advance()
848 size -= buf->len; in pipe_advance()
849 i->head++; in pipe_advance()
852 pipe_discard_from(pipe, i->head + 1); // discard everything past this one in pipe_advance()
855 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
859 if (!i->count) in iov_iter_bvec_advance()
861 i->count -= size; in iov_iter_bvec_advance()
863 size += i->iov_offset; in iov_iter_bvec_advance()
865 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
866 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
868 size -= bvec->bv_len; in iov_iter_bvec_advance()
870 i->iov_offset = size; in iov_iter_bvec_advance()
871 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
872 i->bvec = bvec; in iov_iter_bvec_advance()
875 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
879 if (!i->count) in iov_iter_iovec_advance()
881 i->count -= size; in iov_iter_iovec_advance()
883 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
884 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
885 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
887 size -= iov->iov_len; in iov_iter_iovec_advance()
889 i->iov_offset = size; in iov_iter_iovec_advance()
890 i->nr_segs -= iov - i->iov; in iov_iter_iovec_advance()
891 i->iov = iov; in iov_iter_iovec_advance()
894 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
896 if (unlikely(i->count < size)) in iov_iter_advance()
897 size = i->count; in iov_iter_advance()
899 i->iov_offset += size; in iov_iter_advance()
900 i->count -= size; in iov_iter_advance()
903 iov_iter_iovec_advance(i, size); in iov_iter_advance()
905 iov_iter_bvec_advance(i, size); in iov_iter_advance()
907 pipe_advance(i, size); in iov_iter_advance()
909 i->count -= size; in iov_iter_advance()
920 i->count += unroll; in iov_iter_revert()
922 struct pipe_inode_info *pipe = i->pipe; in iov_iter_revert()
923 unsigned int head = pipe->head; in iov_iter_revert()
925 while (head > i->start_head) { in iov_iter_revert()
926 struct pipe_buffer *b = pipe_buf(pipe, --head); in iov_iter_revert()
927 if (unroll < b->len) { in iov_iter_revert()
928 b->len -= unroll; in iov_iter_revert()
929 i->last_offset = last_offset(b); in iov_iter_revert()
930 i->head = head; in iov_iter_revert()
933 unroll -= b->len; in iov_iter_revert()
935 pipe->head--; in iov_iter_revert()
937 i->last_offset = 0; in iov_iter_revert()
938 i->head = head; in iov_iter_revert()
943 if (unroll <= i->iov_offset) { in iov_iter_revert()
944 i->iov_offset -= unroll; in iov_iter_revert()
947 unroll -= i->iov_offset; in iov_iter_revert()
954 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
956 size_t n = (--bvec)->bv_len; in iov_iter_revert()
957 i->nr_segs++; in iov_iter_revert()
959 i->bvec = bvec; in iov_iter_revert()
960 i->iov_offset = n - unroll; in iov_iter_revert()
963 unroll -= n; in iov_iter_revert()
966 const struct iovec *iov = i->iov; in iov_iter_revert()
968 size_t n = (--iov)->iov_len; in iov_iter_revert()
969 i->nr_segs++; in iov_iter_revert()
971 i->iov = iov; in iov_iter_revert()
972 i->iov_offset = n - unroll; in iov_iter_revert()
975 unroll -= n; in iov_iter_revert()
986 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
988 return min(i->count, i->iov->iov_len - i->iov_offset); in iov_iter_single_seg_count()
990 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
992 return i->count; in iov_iter_single_seg_count()
1033 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); in iov_iter_pipe()
1038 .head = pipe->head, in iov_iter_pipe()
1039 .start_head = pipe->head, in iov_iter_pipe()
1047 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1052 * @count: The size of the I/O buffer in bytes.
1075 * iov_iter_discard - Initialise an I/O iterator that discards data
1078 * @count: The size of the I/O buffer in bytes.
1098 size_t size = i->count; in iov_iter_aligned_iovec() local
1099 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
1102 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_iovec()
1103 size_t len = i->iov[k].iov_len - skip; in iov_iter_aligned_iovec()
1105 if (len > size) in iov_iter_aligned_iovec()
1106 len = size; in iov_iter_aligned_iovec()
1109 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) in iov_iter_aligned_iovec()
1112 size -= len; in iov_iter_aligned_iovec()
1113 if (!size) in iov_iter_aligned_iovec()
1122 size_t size = i->count; in iov_iter_aligned_bvec() local
1123 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
1126 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_bvec()
1127 size_t len = i->bvec[k].bv_len - skip; in iov_iter_aligned_bvec()
1129 if (len > size) in iov_iter_aligned_bvec()
1130 len = size; in iov_iter_aligned_bvec()
1133 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
1136 size -= len; in iov_iter_aligned_bvec()
1137 if (!size) in iov_iter_aligned_bvec()
1144 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1157 if (i->count & len_mask) in iov_iter_is_aligned()
1159 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
1171 size_t size = i->count; in iov_iter_is_aligned() local
1173 if (size & len_mask) in iov_iter_is_aligned()
1175 if (size && i->last_offset > 0) { in iov_iter_is_aligned()
1176 if (i->last_offset & addr_mask) in iov_iter_is_aligned()
1184 if (i->count & len_mask) in iov_iter_is_aligned()
1186 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
1197 size_t size = i->count; in iov_iter_alignment_iovec() local
1198 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
1201 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_iovec()
1202 size_t len = i->iov[k].iov_len - skip; in iov_iter_alignment_iovec()
1204 res |= (unsigned long)i->iov[k].iov_base + skip; in iov_iter_alignment_iovec()
1205 if (len > size) in iov_iter_alignment_iovec()
1206 len = size; in iov_iter_alignment_iovec()
1208 size -= len; in iov_iter_alignment_iovec()
1209 if (!size) in iov_iter_alignment_iovec()
1219 size_t size = i->count; in iov_iter_alignment_bvec() local
1220 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
1223 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_bvec()
1224 size_t len = i->bvec[k].bv_len - skip; in iov_iter_alignment_bvec()
1225 res |= (unsigned long)i->bvec[k].bv_offset + skip; in iov_iter_alignment_bvec()
1226 if (len > size) in iov_iter_alignment_bvec()
1227 len = size; in iov_iter_alignment_bvec()
1229 size -= len; in iov_iter_alignment_bvec()
1230 if (!size) in iov_iter_alignment_bvec()
1239 size_t size = i->count; in iov_iter_alignment() local
1240 if (size) in iov_iter_alignment()
1241 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
1253 size_t size = i->count; in iov_iter_alignment() local
1255 if (size && i->last_offset > 0) in iov_iter_alignment()
1256 return size | i->last_offset; in iov_iter_alignment()
1257 return size; in iov_iter_alignment()
1261 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
1271 size_t size = i->count; in iov_iter_gap_alignment() local
1280 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
1281 if (i->iov[k].iov_len) { in iov_iter_gap_alignment()
1282 unsigned long base = (unsigned long)i->iov[k].iov_base; in iov_iter_gap_alignment()
1285 v = base + i->iov[k].iov_len; in iov_iter_gap_alignment()
1286 if (size <= i->iov[k].iov_len) in iov_iter_gap_alignment()
1288 size -= i->iov[k].iov_len; in iov_iter_gap_alignment()
1295 static int want_pages_array(struct page ***res, size_t size, in want_pages_array() argument
1298 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); in want_pages_array()
1304 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in want_pages_array()
1312 struct page ***pages, size_t maxsize, unsigned maxpages, in pipe_get_pages()
1316 struct page **p; in pipe_get_pages()
1320 return -EFAULT; in pipe_get_pages()
1324 return -EFAULT; in pipe_get_pages()
1327 return -ENOMEM; in pipe_get_pages()
1329 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { in pipe_get_pages()
1330 struct page *page = append_pipe(i, left, &off); in pipe_get_pages() local
1331 if (!page) in pipe_get_pages()
1333 chunk = min_t(size_t, left, PAGE_SIZE - off); in pipe_get_pages()
1334 get_page(*p++ = page); in pipe_get_pages()
1337 return -EFAULT; in pipe_get_pages()
1338 return maxsize - left; in pipe_get_pages()
1341 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, in iter_xarray_populate_pages()
1345 struct page *page; in iter_xarray_populate_pages() local
1349 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iter_xarray_populate_pages()
1350 if (xas_retry(&xas, page)) in iter_xarray_populate_pages()
1353 /* Has the page moved or been split? */ in iter_xarray_populate_pages()
1354 if (unlikely(page != xas_reload(&xas))) { in iter_xarray_populate_pages()
1359 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages()
1369 struct page ***pages, size_t maxsize, in iter_xarray_get_pages()
1376 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1383 return -ENOMEM; in iter_xarray_get_pages()
1384 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1388 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iter_xarray_get_pages()
1389 i->iov_offset += maxsize; in iter_xarray_get_pages()
1390 i->count -= maxsize; in iter_xarray_get_pages()
1394 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1395 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
1401 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1403 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1404 size_t len = i->iov[k].iov_len - skip; in first_iovec_segment()
1408 if (*size > len) in first_iovec_segment()
1409 *size = len; in first_iovec_segment()
1410 return (unsigned long)i->iov[k].iov_base + skip; in first_iovec_segment()
1415 /* must be done on non-empty ITER_BVEC one */
1416 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment()
1417 size_t *size, size_t *start) in first_bvec_segment() argument
1419 struct page *page; in first_bvec_segment() local
1420 size_t skip = i->iov_offset, len; in first_bvec_segment()
1422 len = i->bvec->bv_len - skip; in first_bvec_segment()
1423 if (*size > len) in first_bvec_segment()
1424 *size = len; in first_bvec_segment()
1425 skip += i->bvec->bv_offset; in first_bvec_segment()
1426 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1428 return page; in first_bvec_segment()
1432 struct page ***pages, size_t maxsize, in __iov_iter_get_pages_alloc()
1437 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1438 maxsize = i->count; in __iov_iter_get_pages_alloc()
1451 if (i->nofault) in __iov_iter_get_pages_alloc()
1459 return -ENOMEM; in __iov_iter_get_pages_alloc()
1463 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1468 struct page **p; in __iov_iter_get_pages_alloc()
1469 struct page *page; in __iov_iter_get_pages_alloc() local
1471 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1474 return -ENOMEM; in __iov_iter_get_pages_alloc()
1477 get_page(p[k] = page + k); in __iov_iter_get_pages_alloc()
1478 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1479 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1480 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1481 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1482 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1483 i->bvec++; in __iov_iter_get_pages_alloc()
1484 i->nr_segs--; in __iov_iter_get_pages_alloc()
1492 return -EFAULT; in __iov_iter_get_pages_alloc()
1496 struct page **pages, size_t maxsize, unsigned maxpages, in iov_iter_get_pages2()
1508 struct page ***pages, size_t maxsize, in iov_iter_get_pages_alloc2()
1557 sum = csum_shift(csstate->csum, csstate->off); in csum_and_copy_to_iter()
1568 csstate->csum = csum_shift(sum, csstate->off); in csum_and_copy_to_iter()
1569 csstate->off += bytes; in csum_and_copy_to_iter()
1595 size_t skip = i->iov_offset, size = i->count; in iov_npages() local
1599 for (p = i->iov; size; skip = 0, p++) { in iov_npages()
1600 unsigned offs = offset_in_page(p->iov_base + skip); in iov_npages()
1601 size_t len = min(p->iov_len - skip, size); in iov_npages()
1604 size -= len; in iov_npages()
1615 size_t skip = i->iov_offset, size = i->count; in bvec_npages() local
1619 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1620 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()
1621 size_t len = min(p->bv_len - skip, size); in bvec_npages()
1623 size -= len; in bvec_npages()
1633 if (unlikely(!i->count)) in iov_iter_npages()
1636 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1637 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1655 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1656 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1671 return new->bvec = kmemdup(new->bvec, in dup_iter()
1672 new->nr_segs * sizeof(struct bio_vec), in dup_iter()
1676 return new->iov = kmemdup(new->iov, in dup_iter()
1677 new->nr_segs * sizeof(struct iovec), in dup_iter()
1688 int ret = -EFAULT, i; in copy_compat_iovec_from_user()
1691 return -EFAULT; in copy_compat_iovec_from_user()
1702 ret = -EINVAL; in copy_compat_iovec_from_user()
1721 return -EFAULT; in copy_iovec_from_user()
1724 return -EINVAL; in copy_iovec_from_user()
1745 return ERR_PTR(-EINVAL); in iovec_from_user()
1749 return ERR_PTR(-ENOMEM); in iovec_from_user()
1794 return -EFAULT; in __import_iovec()
1797 if (len > MAX_RW_COUNT - total_len) { in __import_iovec()
1798 len = MAX_RW_COUNT - total_len; in __import_iovec()
1813 * import_iovec() - Copy an array of &struct iovec from userspace
1822 * on-stack) kernel array.
1829 * on-stack array was used or not (and regardless of whether this function
1849 return -EFAULT; in import_single_range()
1851 iov->iov_base = buf; in import_single_range()
1852 iov->iov_len = len; in import_single_range()
1859 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1875 i->iov_offset = state->iov_offset; in iov_iter_restore()
1876 i->count = state->count; in iov_iter_restore()
1880 * For the *vec iters, nr_segs + iov is constant - if we increment in iov_iter_restore()
1884 * size, so we can just increment the iov pointer as they are unionzed. in iov_iter_restore()
1885 * ITER_BVEC _may_ be the same size on some archs, but on others it is in iov_iter_restore()
1890 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1892 i->iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1893 i->nr_segs = state->nr_segs; in iov_iter_restore()