Lines Matching full:pages

219  * Pages that were pinned via pin_user_pages*() must be released via either
221 * that such pages can be separately tracked and uniquely handled. In
231 * For devmap managed pages we need to catch refcount transition from in unpin_user_page()
252 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
253 * @pages: array of pages to be maybe marked dirty, and definitely released.
254 * @npages: number of pages in the @pages array.
255 * @make_dirty: whether to mark the pages dirty
260 * For each page in the @pages array, make that page (or its head page, if a
262 * listed as clean. In any case, releases all pages using unpin_user_page(),
273 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
279 * TODO: this can be optimized for huge pages: if a series of pages is in unpin_user_pages_dirty_lock()
285 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
290 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock()
319 * unpin_user_pages() - release an array of gup-pinned pages.
320 * @pages: array of pages to be marked dirty and released.
321 * @npages: number of pages in the @pages array.
323 * For each page in the @pages array, release the page using unpin_user_page().
327 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
332 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
339 * TODO: this can be optimized for huge pages: if a series of pages is in unpin_user_pages()
344 unpin_user_page(pages[index]); in unpin_user_pages()
354 * has touched so far, we don't want to allocate unnecessary pages or in no_page_table()
448 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
459 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
529 lru_add_drain(); /* push cached pages to LRU */ in follow_page_pte()
741 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
813 /* user gate pages are read-only */ in get_gate_page()
866 /* mlock all present pages, but do not fault in new pages */ in faultin_page()
936 * Anon pages in shared mappings are surprising: now in check_vma_flags()
962 * __get_user_pages() - pin user pages in memory
965 * @nr_pages: number of pages from start to pin
967 * @pages: array that receives pointers to the pages pinned.
969 * only intends to ensure the pages are faulted in.
974 * Returns either number of pages pinned (which may be less than the
978 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
979 * -- If nr_pages is >0, and some pages were pinned, returns the number of
980 * pages pinned. Again, this may be less than nr_pages.
983 * The caller is responsible for releasing returned @pages, via put_page().
1023 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1035 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1056 pages ? &pages[i] : NULL); in __get_user_pages()
1068 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1086 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1122 if (pages) { in __get_user_pages()
1123 pages[i] = page; in __get_user_pages()
1250 struct page **pages, in __get_user_pages_locked() argument
1270 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1274 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1277 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1283 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1312 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1314 if (likely(pages)) in __get_user_pages_locked()
1315 pages += ret; in __get_user_pages_locked()
1344 pages, NULL, locked); in __get_user_pages_locked()
1360 if (likely(pages)) in __get_user_pages_locked()
1361 pages++; in __get_user_pages_locked()
1376 * populate_vma_page_range() - populate a range of pages in the vma.
1382 * This takes care of mlocking the pages too if VM_LOCKED is set.
1384 * Return either number of pages pinned in the vma, or a negative error
1435 * __mm_populate - populate and/or mlock pages within a range of address space.
1453 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
1474 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
1475 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
1495 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1521 if (pages) { in __get_user_pages_locked()
1522 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1523 if (pages[i]) in __get_user_pages_locked()
1524 get_page(pages[i]); in __get_user_pages_locked()
1594 struct page **pages, in check_and_migrate_cma_pages() argument
1612 struct page *head = compound_head(pages[i]); in check_and_migrate_cma_pages()
1618 step = compound_nr(head) - (pages[i] - head); in check_and_migrate_cma_pages()
1651 unpin_user_pages(pages, nr_pages); in check_and_migrate_cma_pages()
1654 put_page(pages[i]); in check_and_migrate_cma_pages()
1659 * some of the pages failed migration. Do get_user_pages in check_and_migrate_cma_pages()
1668 * We did migrate all the pages, Try to get the page references in check_and_migrate_cma_pages()
1669 * again migrating any new CMA pages which we failed to isolate in check_and_migrate_cma_pages()
1673 pages, vmas, NULL, in check_and_migrate_cma_pages()
1689 struct page **pages, in check_and_migrate_cma_pages() argument
1704 struct page **pages, in __gup_longterm_locked() argument
1713 if (!pages) in __gup_longterm_locked()
1726 rc = __get_user_pages_locked(mm, start, nr_pages, pages, in __gup_longterm_locked()
1735 unpin_user_pages(pages, rc); in __gup_longterm_locked()
1738 put_page(pages[i]); in __gup_longterm_locked()
1743 rc = check_and_migrate_cma_pages(mm, start, rc, pages, in __gup_longterm_locked()
1757 struct page **pages, in __gup_longterm_locked() argument
1761 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1788 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1805 return __gup_longterm_locked(mm, start, nr_pages, pages, in __get_user_pages_remote()
1810 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __get_user_pages_remote()
1816 * get_user_pages_remote() - pin user pages in memory
1819 * @nr_pages: number of pages from start to pin
1821 * @pages: array that receives pointers to the pages pinned.
1823 * only intends to ensure the pages are faulted in.
1830 * Returns either number of pages pinned (which may be less than the
1834 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1835 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1836 * pages pinned. Again, this may be less than nr_pages.
1838 * The caller is responsible for releasing returned @pages, via put_page().
1864 * via the user virtual addresses. The pages may be submitted for
1877 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1884 pages, vmas, locked); in get_user_pages_remote()
1891 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1899 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1907 * get_user_pages() - pin user pages in memory
1909 * @nr_pages: number of pages from start to pin
1911 * @pages: array that receives pointers to the pages pinned.
1913 * only intends to ensure the pages are faulted in.
1923 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
1930 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
1939 * get_user_pages(mm, ..., pages, NULL);
1947 * get_user_pages_locked(mm, ..., pages, &locked);
1952 * @nr_pages: number of pages from start to pin
1954 * @pages: array that receives pointers to the pages pinned.
1956 * only intends to ensure the pages are faulted in.
1967 unsigned int gup_flags, struct page **pages, in get_user_pages_locked() argument
1986 pages, NULL, locked, in get_user_pages_locked()
1995 * get_user_pages(mm, ..., pages, NULL);
2000 * get_user_pages_unlocked(mm, ..., pages);
2007 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2023 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2034 * get_user_pages_fast attempts to pin user pages by walking the page
2036 * protected from page table pages being freed from under it, and should
2041 * pages are freed. This is unsuitable for architectures that do not need
2044 * Another way to achieve this is to batch up page table containing pages
2046 * pages. Disabling interrupts will allow the fast_gup walker to both block
2054 * free pages containing page tables or TLB flushing requires IPI broadcast.
2146 struct page **pages) in undo_dev_pagemap() argument
2149 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2161 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2188 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2222 pages[*nr] = page; in gup_pte_range()
2243 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2247 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2256 struct page **pages, int *nr) in __gup_device_huge() argument
2266 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2270 pages[*nr] = page; in __gup_device_huge()
2272 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2286 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2292 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2296 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2304 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2310 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2314 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2322 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2330 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2338 unsigned long end, struct page **pages) in record_subpages() argument
2343 pages[nr++] = page++; in record_subpages()
2358 struct page **pages, int *nr) in gup_hugepte() argument
2379 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2397 struct page **pages, int *nr) in gup_huge_pd() argument
2406 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2415 struct page **pages, int *nr) in gup_huge_pd() argument
2423 struct page **pages, int *nr) in gup_huge_pmd() argument
2435 pages, nr); in gup_huge_pmd()
2439 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2457 struct page **pages, int *nr) in gup_huge_pud() argument
2469 pages, nr); in gup_huge_pud()
2473 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2491 struct page **pages, int *nr) in gup_huge_pgd() argument
2502 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2519 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2543 pages, nr)) in gup_pmd_range()
2552 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2554 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) in gup_pmd_range()
2562 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2576 pages, nr)) in gup_pud_range()
2580 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2582 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
2590 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2605 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2607 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2615 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2629 pages, nr)) in gup_pgd_range()
2633 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2635 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2641 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2658 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2670 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2674 pages, gup_flags); in __gup_longterm_unlocked()
2682 struct page **pages) in internal_get_user_pages_fast() argument
2713 * With interrupts disabled, we block page table pages from being in internal_get_user_pages_fast()
2724 gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned); in internal_get_user_pages_fast()
2730 /* Try to get the remaining pages with get_user_pages */ in internal_get_user_pages_fast()
2732 pages += nr_pinned; in internal_get_user_pages_fast()
2735 gup_flags, pages); in internal_get_user_pages_fast()
2749 * get_user_pages_fast_only() - pin user pages in memory
2751 * @nr_pages: number of pages from start to pin
2753 * @pages: array that receives pointers to the pages pinned.
2759 * number of pages pinned, 0 if no pages were pinned.
2762 * pages pinned.
2769 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
2782 pages); in get_user_pages_fast_only()
2798 * get_user_pages_fast() - pin user pages in memory
2800 * @nr_pages: number of pages from start to pin
2802 * @pages: array that receives pointers to the pages pinned.
2805 * Attempt to pin user pages in memory without taking mm->mmap_lock.
2809 * Returns number of pages pinned. This may be fewer than the number requested.
2810 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2814 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
2826 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
2831 * pin_user_pages_fast() - pin user pages in memory without taking locks
2834 * @nr_pages: number of pages from start to pin
2836 * @pages: array that receives pointers to the pages pinned.
2843 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2847 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
2854 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
2865 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast_only() argument
2881 pages); in pin_user_pages_fast_only()
2895 * pin_user_pages_remote() - pin pages of a remote process
2899 * @nr_pages: number of pages from start to pin
2901 * @pages: array that receives pointers to the pages pinned.
2903 * only intends to ensure the pages are faulted in.
2914 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2919 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
2928 pages, vmas, locked); in pin_user_pages_remote()
2933 * pin_user_pages() - pin user pages in memory for use by other devices
2936 * @nr_pages: number of pages from start to pin
2938 * @pages: array that receives pointers to the pages pinned.
2940 * only intends to ensure the pages are faulted in.
2947 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2951 unsigned int gup_flags, struct page **pages, in pin_user_pages() argument
2960 pages, vmas, gup_flags); in pin_user_pages()
2970 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
2977 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); in pin_user_pages_unlocked()
2987 unsigned int gup_flags, struct page **pages, in pin_user_pages_locked() argument
3005 pages, NULL, locked, in pin_user_pages_locked()