Lines Matching +full:min +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0-only
5 #include <linux/fault-inject-usercopy.h>
18 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ argument
20 len = n; \
21 base = __p + i->iov_offset; \
22 len -= (STEP); \
23 i->iov_offset += len; \
24 n = len; \
28 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ argument
30 size_t skip = i->iov_offset; \
32 len = min(n, __p->iov_len - skip); \
33 if (likely(len)) { \
34 base = __p->iov_base + skip; \
35 len -= (STEP); \
36 off += len; \
37 skip += len; \
38 n -= len; \
39 if (skip < __p->iov_len) \
45 i->iov_offset = skip; \
49 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ argument
51 unsigned skip = i->iov_offset; \
53 unsigned offset = p->bv_offset + skip; \
55 void *kaddr = kmap_local_page(p->bv_page + \
58 len = min(min(n, (size_t)(p->bv_len - skip)), \
59 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
62 len -= left; \
63 off += len; \
64 skip += len; \
65 if (skip == p->bv_len) { \
69 n -= len; \
73 i->iov_offset = skip; \
77 #define iterate_xarray(i, n, base, len, __off, STEP) { \ argument
81 loff_t start = i->xarray_start + i->iov_offset; \
83 XA_STATE(xas, i->xarray, index); \
85 len = PAGE_SIZE - offset_in_page(start); \
99 len = min(n, len); \
102 len -= left; \
103 __off += len; \
104 n -= len; \
107 offset += len; \
108 len = PAGE_SIZE; \
113 i->iov_offset += __off; \
117 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ argument
118 if (unlikely(i->count < n)) \
119 n = i->count; \
123 size_t len; \
124 iterate_buf(i, n, base, len, off, \
125 i->ubuf, (I)) \
129 size_t len; \
130 iterate_iovec(i, n, base, len, off, \
132 i->nr_segs -= iov - iter_iov(i); \
133 i->__iov = iov; \
135 const struct bio_vec *bvec = i->bvec; \
137 size_t len; \
138 iterate_bvec(i, n, base, len, off, \
140 i->nr_segs -= bvec - i->bvec; \
141 i->bvec = bvec; \
143 const struct kvec *kvec = i->kvec; \
145 size_t len; \
146 iterate_iovec(i, n, base, len, off, \
148 i->nr_segs -= kvec - i->kvec; \
149 i->kvec = kvec; \
152 size_t len; \
153 iterate_xarray(i, n, base, len, off, \
156 i->count -= n; \
159 #define iterate_and_advance(i, n, base, len, off, I, K) \ argument
160 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
200 * fault_in_iov_iter_readable - fault in iov iterator for reading
210 * Always returns 0 for non-userspace iterators.
215 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
216 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
217 return size - n; in fault_in_iov_iter_readable()
219 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
223 size -= count; in fault_in_iov_iter_readable()
224 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
225 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable() local
228 if (unlikely(!len)) in fault_in_iov_iter_readable()
230 ret = fault_in_readable(p->iov_base + skip, len); in fault_in_iov_iter_readable()
231 count -= len - ret; in fault_in_iov_iter_readable()
242 * fault_in_iov_iter_writeable - fault in iov iterator for writing
253 * Always returns 0 for non-user-space iterators.
258 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
259 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
260 return size - n; in fault_in_iov_iter_writeable()
262 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
266 size -= count; in fault_in_iov_iter_writeable()
267 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
268 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable() local
271 if (unlikely(!len)) in fault_in_iov_iter_writeable()
273 ret = fault_in_safe_writeable(p->iov_base + skip, len); in fault_in_iov_iter_writeable()
274 count -= len - ret; in fault_in_iov_iter_writeable()
303 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, in csum_and_memcpy() argument
306 __wsum next = csum_partial_copy_nocheck(from, to, len); in csum_and_memcpy()
312 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
316 iterate_and_advance(i, bytes, base, len, off, in _copy_to_iter()
317 copyout(base, addr + off, len), in _copy_to_iter()
318 memcpy(base, addr + off, len) in _copy_to_iter()
336 * _copy_mc_to_iter - copy to iter with source memory error exception handling
342 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
343 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
349 * byte-by-byte until the fault happens again. Re-triggering machine
351 * alignment and poison alignment assumptions to avoid re-triggering
361 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
365 __iterate_and_advance(i, bytes, base, len, off, in _copy_mc_to_iter()
366 copyout_mc(base, addr + off, len), in _copy_mc_to_iter()
367 copy_mc_to_kernel(base, addr + off, len) in _copy_mc_to_iter()
385 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
390 iterate_and_advance(i, bytes, base, len, off, in _copy_from_iter()
391 copyin(addr + off, base, len), in _copy_from_iter()
392 memcpy_from_iter(i, addr + off, base, len) in _copy_from_iter()
401 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
404 iterate_and_advance(i, bytes, base, len, off, in _copy_from_iter_nocache()
405 __copy_from_user_inatomic_nocache(addr + off, base, len), in _copy_from_iter_nocache()
406 memcpy(addr + off, base, len) in _copy_from_iter_nocache()
415 * _copy_from_iter_flushcache - write destination through cpu cache
420 * The pmem driver arranges for filesystem-dax to use this facility via
426 * instructions that strand dirty-data in the cache.
432 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
435 iterate_and_advance(i, bytes, base, len, off, in _copy_from_iter_flushcache()
436 __copy_from_user_flushcache(addr + off, base, len), in _copy_from_iter_flushcache()
437 memcpy_flushcache(addr + off, base, len) in _copy_from_iter_flushcache()
453 * However, we mostly deal with order-0 pages and thus can in page_copy_sane()
461 v += (page - head) << PAGE_SHIFT; in page_copy_sane()
474 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
480 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter()
484 bytes -= n; in copy_page_to_iter()
504 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
510 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter_nofault()
512 iterate_and_advance(i, n, base, len, off, in copy_page_to_iter_nofault()
513 copyout_nofault(base, kaddr + offset + off, len), in copy_page_to_iter_nofault()
514 memcpy(base, kaddr + offset + off, len) in copy_page_to_iter_nofault()
518 bytes -= n; in copy_page_to_iter_nofault()
541 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter()
545 bytes -= n; in copy_page_from_iter()
560 iterate_and_advance(i, bytes, base, len, count, in iov_iter_zero()
561 clear_user(base, len), in iov_iter_zero()
562 memset(base, 0, len) in iov_iter_zero()
576 if (WARN_ON_ONCE(!i->data_source)) in copy_page_from_iter_atomic()
582 n = bytes - copied; in copy_page_from_iter_atomic()
586 n = min_t(size_t, n, PAGE_SIZE - offset); in copy_page_from_iter_atomic()
590 iterate_and_advance(i, n, base, len, off, in copy_page_from_iter_atomic()
591 copyin(p + off, base, len), in copy_page_from_iter_atomic()
592 memcpy_from_iter(i, p + off, base, len) in copy_page_from_iter_atomic()
607 if (!i->count) in iov_iter_bvec_advance()
609 i->count -= size; in iov_iter_bvec_advance()
611 size += i->iov_offset; in iov_iter_bvec_advance()
613 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
614 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
616 size -= bvec->bv_len; in iov_iter_bvec_advance()
618 i->iov_offset = size; in iov_iter_bvec_advance()
619 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
620 i->bvec = bvec; in iov_iter_bvec_advance()
627 if (!i->count) in iov_iter_iovec_advance()
629 i->count -= size; in iov_iter_iovec_advance()
631 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
632 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
633 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
635 size -= iov->iov_len; in iov_iter_iovec_advance()
637 i->iov_offset = size; in iov_iter_iovec_advance()
638 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
639 i->__iov = iov; in iov_iter_iovec_advance()
644 if (unlikely(i->count < size)) in iov_iter_advance()
645 size = i->count; in iov_iter_advance()
647 i->iov_offset += size; in iov_iter_advance()
648 i->count -= size; in iov_iter_advance()
655 i->count -= size; in iov_iter_advance()
666 i->count += unroll; in iov_iter_revert()
669 if (unroll <= i->iov_offset) { in iov_iter_revert()
670 i->iov_offset -= unroll; in iov_iter_revert()
673 unroll -= i->iov_offset; in iov_iter_revert()
680 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
682 size_t n = (--bvec)->bv_len; in iov_iter_revert()
683 i->nr_segs++; in iov_iter_revert()
685 i->bvec = bvec; in iov_iter_revert()
686 i->iov_offset = n - unroll; in iov_iter_revert()
689 unroll -= n; in iov_iter_revert()
694 size_t n = (--iov)->iov_len; in iov_iter_revert()
695 i->nr_segs++; in iov_iter_revert()
697 i->__iov = iov; in iov_iter_revert()
698 i->iov_offset = n - unroll; in iov_iter_revert()
701 unroll -= n; in iov_iter_revert()
712 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
714 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
716 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
718 return i->count; in iov_iter_single_seg_count()
757 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
786 * iov_iter_discard - Initialise an I/O iterator that discards data
810 size_t size = i->count; in iov_iter_aligned_iovec()
811 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
814 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_iovec()
816 size_t len = iov->iov_len - skip; in iov_iter_aligned_iovec() local
818 if (len > size) in iov_iter_aligned_iovec()
819 len = size; in iov_iter_aligned_iovec()
820 if (len & len_mask) in iov_iter_aligned_iovec()
822 if ((unsigned long)(iov->iov_base + skip) & addr_mask) in iov_iter_aligned_iovec()
825 size -= len; in iov_iter_aligned_iovec()
835 size_t size = i->count; in iov_iter_aligned_bvec()
836 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
839 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_aligned_bvec()
840 size_t len = i->bvec[k].bv_len - skip; in iov_iter_aligned_bvec() local
842 if (len > size) in iov_iter_aligned_bvec()
843 len = size; in iov_iter_aligned_bvec()
844 if (len & len_mask) in iov_iter_aligned_bvec()
846 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
849 size -= len; in iov_iter_aligned_bvec()
857 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
870 if (i->count & len_mask) in iov_iter_is_aligned()
872 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
884 if (i->count & len_mask) in iov_iter_is_aligned()
886 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
897 size_t size = i->count; in iov_iter_alignment_iovec()
898 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
901 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_iovec()
903 size_t len = iov->iov_len - skip; in iov_iter_alignment_iovec() local
904 if (len) { in iov_iter_alignment_iovec()
905 res |= (unsigned long)iov->iov_base + skip; in iov_iter_alignment_iovec()
906 if (len > size) in iov_iter_alignment_iovec()
907 len = size; in iov_iter_alignment_iovec()
908 res |= len; in iov_iter_alignment_iovec()
909 size -= len; in iov_iter_alignment_iovec()
920 size_t size = i->count; in iov_iter_alignment_bvec()
921 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
924 for (k = 0; k < i->nr_segs; k++, skip = 0) { in iov_iter_alignment_bvec()
925 size_t len = i->bvec[k].bv_len - skip; in iov_iter_alignment_bvec() local
926 res |= (unsigned long)i->bvec[k].bv_offset + skip; in iov_iter_alignment_bvec()
927 if (len > size) in iov_iter_alignment_bvec()
928 len = size; in iov_iter_alignment_bvec()
929 res |= len; in iov_iter_alignment_bvec()
930 size -= len; in iov_iter_alignment_bvec()
940 size_t size = i->count; in iov_iter_alignment()
942 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
954 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
964 size_t size = i->count; in iov_iter_gap_alignment()
973 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
975 if (iov->iov_len) { in iov_iter_gap_alignment()
976 unsigned long base = (unsigned long)iov->iov_base; in iov_iter_gap_alignment()
979 v = base + iov->iov_len; in iov_iter_gap_alignment()
980 if (size <= iov->iov_len) in iov_iter_gap_alignment()
982 size -= iov->iov_len; in iov_iter_gap_alignment()
1040 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1047 return -ENOMEM; in iter_xarray_get_pages()
1048 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1052 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iter_xarray_get_pages()
1053 i->iov_offset += maxsize; in iter_xarray_get_pages()
1054 i->count -= maxsize; in iter_xarray_get_pages()
1058 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1065 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1067 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1069 size_t len = iov->iov_len - skip; in first_iovec_segment() local
1071 if (unlikely(!len)) in first_iovec_segment()
1073 if (*size > len) in first_iovec_segment()
1074 *size = len; in first_iovec_segment()
1075 return (unsigned long)iov->iov_base + skip; in first_iovec_segment()
1080 /* must be done on non-empty ITER_BVEC one */
1085 size_t skip = i->iov_offset, len; in first_bvec_segment() local
1087 len = i->bvec->bv_len - skip; in first_bvec_segment()
1088 if (*size > len) in first_bvec_segment()
1089 *size = len; in first_bvec_segment()
1090 skip += i->bvec->bv_offset; in first_bvec_segment()
1091 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1102 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1103 maxsize = i->count; in __iov_iter_get_pages_alloc()
1115 if (i->nofault) in __iov_iter_get_pages_alloc()
1123 return -ENOMEM; in __iov_iter_get_pages_alloc()
1127 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1138 return -ENOMEM; in __iov_iter_get_pages_alloc()
1142 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1143 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1144 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1145 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1146 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1147 i->bvec++; in __iov_iter_get_pages_alloc()
1148 i->nr_segs--; in __iov_iter_get_pages_alloc()
1154 return -EFAULT; in __iov_iter_get_pages_alloc()
1171 ssize_t len; in iov_iter_get_pages_alloc2() local
1175 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); in iov_iter_get_pages_alloc2()
1176 if (len <= 0) { in iov_iter_get_pages_alloc2()
1180 return len; in iov_iter_get_pages_alloc2()
1189 if (WARN_ON_ONCE(!i->data_source)) in csum_and_copy_from_iter()
1192 iterate_and_advance(i, bytes, base, len, off, ({ in csum_and_copy_from_iter()
1193 next = csum_and_copy_from_user(base, addr + off, len); in csum_and_copy_from_iter()
1195 next ? 0 : len; in csum_and_copy_from_iter()
1197 sum = csum_and_memcpy(addr + off, base, len, sum, off); in csum_and_copy_from_iter()
1211 if (WARN_ON_ONCE(i->data_source)) in csum_and_copy_to_iter()
1214 // can't use csum_memcpy() for that one - data is not copied in csum_and_copy_to_iter()
1215 csstate->csum = csum_block_add(csstate->csum, in csum_and_copy_to_iter()
1217 csstate->off); in csum_and_copy_to_iter()
1218 csstate->off += bytes; in csum_and_copy_to_iter()
1222 sum = csum_shift(csstate->csum, csstate->off); in csum_and_copy_to_iter()
1223 iterate_and_advance(i, bytes, base, len, off, ({ in csum_and_copy_to_iter()
1224 next = csum_and_copy_to_user(addr + off, base, len); in csum_and_copy_to_iter()
1226 next ? 0 : len; in csum_and_copy_to_iter()
1228 sum = csum_and_memcpy(base, addr + off, len, sum, off); in csum_and_copy_to_iter()
1231 csstate->csum = csum_shift(sum, csstate->off); in csum_and_copy_to_iter()
1232 csstate->off += bytes; in csum_and_copy_to_iter()
1258 size_t skip = i->iov_offset, size = i->count; in iov_npages()
1263 unsigned offs = offset_in_page(p->iov_base + skip); in iov_npages()
1264 size_t len = min(p->iov_len - skip, size); in iov_npages() local
1266 if (len) { in iov_npages()
1267 size -= len; in iov_npages()
1268 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); in iov_npages()
1278 size_t skip = i->iov_offset, size = i->count; in bvec_npages()
1282 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1283 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()
1284 size_t len = min(p->bv_len - skip, size); in bvec_npages() local
1286 size -= len; in bvec_npages()
1287 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); in bvec_npages()
1296 if (unlikely(!i->count)) in iov_iter_npages()
1299 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1300 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1301 return min(npages, maxpages); in iov_iter_npages()
1309 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1310 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1311 return min(npages, maxpages); in iov_iter_npages()
1321 return new->bvec = kmemdup(new->bvec, in dup_iter()
1322 new->nr_segs * sizeof(struct bio_vec), in dup_iter()
1326 return new->__iov = kmemdup(new->__iov, in dup_iter()
1327 new->nr_segs * sizeof(struct iovec), in dup_iter()
1338 int ret = -EFAULT, i; in copy_compat_iovec_from_user()
1341 return -EFAULT; in copy_compat_iovec_from_user()
1345 compat_ssize_t len; in copy_compat_iovec_from_user() local
1347 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); in copy_compat_iovec_from_user()
1351 if (len < 0) { in copy_compat_iovec_from_user()
1352 ret = -EINVAL; in copy_compat_iovec_from_user()
1356 iov[i].iov_len = len; in copy_compat_iovec_from_user()
1368 int ret = -EFAULT; in copy_iovec_from_user()
1371 return -EFAULT; in copy_iovec_from_user()
1375 ssize_t len; in copy_iovec_from_user() local
1377 unsafe_get_user(len, &uiov->iov_len, uaccess_end); in copy_iovec_from_user()
1378 unsafe_get_user(buf, &uiov->iov_base, uaccess_end); in copy_iovec_from_user()
1381 if (unlikely(len < 0)) { in copy_iovec_from_user()
1382 ret = -EINVAL; in copy_iovec_from_user()
1385 iov->iov_base = buf; in copy_iovec_from_user()
1386 iov->iov_len = len; in copy_iovec_from_user()
1389 } while (--nr_segs); in copy_iovec_from_user()
1412 return ERR_PTR(-EINVAL); in iovec_from_user()
1416 return ERR_PTR(-ENOMEM); in iovec_from_user()
1449 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1453 return i->count; in __import_iovec_ubuf()
1482 ssize_t len = (ssize_t)iov[seg].iov_len; in __import_iovec() local
1484 if (!access_ok(iov[seg].iov_base, len)) { in __import_iovec()
1488 return -EFAULT; in __import_iovec()
1491 if (len > MAX_RW_COUNT - total_len) { in __import_iovec()
1492 len = MAX_RW_COUNT - total_len; in __import_iovec()
1493 iov[seg].iov_len = len; in __import_iovec()
1495 total_len += len; in __import_iovec()
1507 * import_iovec() - Copy an array of &struct iovec from userspace
1516 * on-stack) kernel array.
1523 * on-stack array was used or not (and regardless of whether this function
1537 int import_single_range(int rw, void __user *buf, size_t len, in import_single_range() argument
1540 if (len > MAX_RW_COUNT) in import_single_range()
1541 len = MAX_RW_COUNT; in import_single_range()
1542 if (unlikely(!access_ok(buf, len))) in import_single_range()
1543 return -EFAULT; in import_single_range()
1545 iov_iter_ubuf(i, rw, buf, len); in import_single_range()
1550 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) in import_ubuf() argument
1552 if (len > MAX_RW_COUNT) in import_ubuf()
1553 len = MAX_RW_COUNT; in import_ubuf()
1554 if (unlikely(!access_ok(buf, len))) in import_ubuf()
1555 return -EFAULT; in import_ubuf()
1557 iov_iter_ubuf(i, rw, buf, len); in import_ubuf()
1563 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1579 i->iov_offset = state->iov_offset; in iov_iter_restore()
1580 i->count = state->count; in iov_iter_restore()
1584 * For the *vec iters, nr_segs + iov is constant - if we increment in iov_iter_restore()
1594 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1596 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1597 i->nr_segs = state->nr_segs; in iov_iter_restore()
1612 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1614 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages()
1621 return -ENOMEM; in iov_iter_extract_xarray_pages()
1641 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iov_iter_extract_xarray_pages()
1657 size_t skip = i->iov_offset, offset, size; in iov_iter_extract_bvec_pages()
1661 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1663 size = min(maxsize, i->bvec->bv_len - skip); in iov_iter_extract_bvec_pages()
1666 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1667 i->nr_segs--; in iov_iter_extract_bvec_pages()
1668 i->bvec++; in iov_iter_extract_bvec_pages()
1672 skip += i->bvec->bv_offset; in iov_iter_extract_bvec_pages()
1673 page = i->bvec->bv_page + skip / PAGE_SIZE; in iov_iter_extract_bvec_pages()
1679 return -ENOMEM; in iov_iter_extract_bvec_pages()
1684 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_bvec_pages()
1701 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages() local
1705 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1707 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1710 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1711 i->nr_segs--; in iov_iter_extract_kvec_pages()
1712 i->kvec++; in iov_iter_extract_kvec_pages()
1716 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1722 return -ENOMEM; in iov_iter_extract_kvec_pages()
1725 kaddr -= offset; in iov_iter_extract_kvec_pages()
1726 len = offset + size; in iov_iter_extract_kvec_pages()
1728 size_t seg = min_t(size_t, len, PAGE_SIZE); in iov_iter_extract_kvec_pages()
1736 len -= seg; in iov_iter_extract_kvec_pages()
1740 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_kvec_pages()
1747 * each of them. This should only be used if the iterator is user-backed
1769 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1773 if (i->nofault) in iov_iter_extract_user_pages()
1781 return -ENOMEM; in iov_iter_extract_user_pages()
1785 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); in iov_iter_extract_user_pages()
1791 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1808 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1816 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1831 * It may also return -ENOMEM and -EFAULT.
1840 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1860 return -EFAULT; in iov_iter_extract_pages()