Lines Matching full:pages
84 * So now that the head page is stable, recheck that the pages still in try_get_compound_head()
115 * FOLL_PIN on compound pages that are > two pages long: page's refcount will
119 * FOLL_PIN on normal pages, or compound pages that are two pages long:
221 * Pages that were pinned via pin_user_pages*() must be released via either
223 * that such pages can be separately tracked and uniquely handled. In
285 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
286 * @pages: array of pages to be maybe marked dirty, and definitely released.
287 * @npages: number of pages in the @pages array.
288 * @make_dirty: whether to mark the pages dirty
293 * For each page in the @pages array, make that page (or its head page, if a
295 * listed as clean. In any case, releases all pages using unpin_user_page(),
306 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
314 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
318 for_each_compound_head(index, pages, npages, head, ntails) { in unpin_user_pages_dirty_lock()
351 * @npages: number of consecutive pages to release.
352 * @make_dirty: whether to mark the pages dirty
354 * "gup-pinned page range" refers to a range of pages that has had one of the
358 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
383 * unpin_user_pages() - release an array of gup-pinned pages.
384 * @pages: array of pages to be marked dirty and released.
385 * @npages: number of pages in the @pages array.
387 * For each page in the @pages array, release the page using unpin_user_page().
391 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
398 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
405 for_each_compound_head(index, pages, npages, head, ntails) in unpin_user_pages()
427 * has touched so far, we don't want to allocate unnecessary pages or in no_page_table()
521 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
532 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
590 lru_add_drain(); /* push cached pages to LRU */ in follow_page_pte()
790 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
865 /* user gate pages are read-only */ in get_gate_page()
918 /* mlock all present pages, but do not fault in new pages */ in faultin_page()
994 * Anon pages in shared mappings are surprising: now in check_vma_flags()
1020 * __get_user_pages() - pin user pages in memory
1023 * @nr_pages: number of pages from start to pin
1025 * @pages: array that receives pointers to the pages pinned.
1027 * only intends to ensure the pages are faulted in.
1032 * Returns either number of pages pinned (which may be less than the
1036 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1037 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1038 * pages pinned. Again, this may be less than nr_pages.
1041 * The caller is responsible for releasing returned @pages, via put_page().
1081 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1093 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1114 pages ? &pages[i] : NULL); in __get_user_pages()
1130 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1147 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1183 if (pages) { in __get_user_pages()
1184 pages[i] = page; in __get_user_pages()
1310 struct page **pages, in __get_user_pages_locked() argument
1330 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1334 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1337 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1343 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1372 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1374 if (likely(pages)) in __get_user_pages_locked()
1375 pages += ret; in __get_user_pages_locked()
1404 pages, NULL, locked); in __get_user_pages_locked()
1420 if (likely(pages)) in __get_user_pages_locked()
1421 pages++; in __get_user_pages_locked()
1436 * populate_vma_page_range() - populate a range of pages in the vma.
1442 * This takes care of mlocking the pages too if VM_LOCKED is set.
1444 * Return either number of pages pinned in the vma, or a negative error
1498 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1506 * Returns either number of processed pages in the vma, or a negative error
1556 * __mm_populate - populate and/or mlock pages within a range of address space.
1574 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
1595 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
1596 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
1616 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1642 if (pages) { in __get_user_pages_locked()
1643 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1644 if (pages[i]) in __get_user_pages_locked()
1645 get_page(pages[i]); in __get_user_pages_locked()
1693 * Check whether all pages are pinnable, if so return number of pages. If some
1694 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1695 * pages were migrated, or if some pages were not successfully isolated.
1699 struct page **pages, in check_and_migrate_movable_pages() argument
1715 head = compound_head(pages[i]); in check_and_migrate_movable_pages()
1747 * If list is empty, and no isolation errors, means that all pages are in check_and_migrate_movable_pages()
1754 unpin_user_pages(pages, nr_pages); in check_and_migrate_movable_pages()
1757 put_page(pages[i]); in check_and_migrate_movable_pages()
1771 struct page **pages, in check_and_migrate_movable_pages() argument
1785 struct page **pages, in __gup_longterm_locked() argument
1793 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1797 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1801 rc = check_and_migrate_movable_pages(rc, pages, gup_flags); in __gup_longterm_locked()
1830 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1847 return __gup_longterm_locked(mm, start, nr_pages, pages, in __get_user_pages_remote()
1852 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __get_user_pages_remote()
1858 * get_user_pages_remote() - pin user pages in memory
1861 * @nr_pages: number of pages from start to pin
1863 * @pages: array that receives pointers to the pages pinned.
1865 * only intends to ensure the pages are faulted in.
1872 * Returns either number of pages pinned (which may be less than the
1876 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1877 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1878 * pages pinned. Again, this may be less than nr_pages.
1880 * The caller is responsible for releasing returned @pages, via put_page().
1906 * via the user virtual addresses. The pages may be submitted for
1919 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1926 pages, vmas, locked); in get_user_pages_remote()
1933 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1941 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1949 * get_user_pages() - pin user pages in memory
1951 * @nr_pages: number of pages from start to pin
1953 * @pages: array that receives pointers to the pages pinned.
1955 * only intends to ensure the pages are faulted in.
1965 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
1972 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
1980 * @nr_pages: number of pages from start to pin
1982 * @pages: array that receives pointers to the pages pinned.
1984 * only intends to ensure the pages are faulted in.
1993 * get_user_pages(mm, ..., pages, NULL);
2001 * get_user_pages_locked(mm, ..., pages, &locked);
2011 unsigned int gup_flags, struct page **pages, in get_user_pages_locked() argument
2030 pages, NULL, locked, in get_user_pages_locked()
2039 * get_user_pages(mm, ..., pages, NULL);
2044 * get_user_pages_unlocked(mm, ..., pages);
2051 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2067 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2078 * get_user_pages_fast attempts to pin user pages by walking the page
2080 * protected from page table pages being freed from under it, and should
2085 * pages are freed. This is unsuitable for architectures that do not need
2088 * Another way to achieve this is to batch up page table containing pages
2090 * pages. Disabling interrupts will allow the fast_gup walker to both block
2098 * free pages containing page tables or TLB flushing requires IPI broadcast.
2112 struct page **pages) in undo_dev_pagemap() argument
2115 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2127 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2154 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2193 pages[*nr] = page; in gup_pte_range()
2214 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2218 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
2227 struct page **pages, int *nr) in __gup_device_huge() argument
2238 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2243 pages[*nr] = page; in __gup_device_huge()
2245 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2259 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2265 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2269 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2277 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2283 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2287 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2295 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2303 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2311 unsigned long end, struct page **pages) in record_subpages() argument
2316 pages[nr++] = page++; in record_subpages()
2331 struct page **pages, int *nr) in gup_hugepte() argument
2352 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2370 struct page **pages, int *nr) in gup_huge_pd() argument
2379 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2388 struct page **pages, int *nr) in gup_huge_pd() argument
2396 struct page **pages, int *nr) in gup_huge_pmd() argument
2408 pages, nr); in gup_huge_pmd()
2412 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2430 struct page **pages, int *nr) in gup_huge_pud() argument
2442 pages, nr); in gup_huge_pud()
2446 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2464 struct page **pages, int *nr) in gup_huge_pgd() argument
2475 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2492 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2516 pages, nr)) in gup_pmd_range()
2525 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2527 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) in gup_pmd_range()
2535 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2549 pages, nr)) in gup_pud_range()
2553 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2555 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
2563 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2578 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2580 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2588 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2602 pages, nr)) in gup_pgd_range()
2606 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2608 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2614 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2631 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2643 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2647 pages, gup_flags); in __gup_longterm_unlocked()
2656 struct page **pages) in lockless_pages_from_mm() argument
2676 * With interrupts disabled, we block page table pages from being freed in lockless_pages_from_mm()
2684 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
2688 * When pinning pages for DMA there could be a concurrent write protect in lockless_pages_from_mm()
2693 unpin_user_pages(pages, nr_pinned); in lockless_pages_from_mm()
2703 struct page **pages) in internal_get_user_pages_fast() argument
2727 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
2731 /* Slow path: try to get the remaining pages with get_user_pages */ in internal_get_user_pages_fast()
2733 pages += nr_pinned; in internal_get_user_pages_fast()
2735 pages); in internal_get_user_pages_fast()
2738 * The caller has to unpin the pages we already pinned so in internal_get_user_pages_fast()
2749 * get_user_pages_fast_only() - pin user pages in memory
2751 * @nr_pages: number of pages from start to pin
2753 * @pages: array that receives pointers to the pages pinned.
2759 * number of pages pinned, 0 if no pages were pinned.
2762 * pages pinned.
2769 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
2782 pages); in get_user_pages_fast_only()
2798 * get_user_pages_fast() - pin user pages in memory
2800 * @nr_pages: number of pages from start to pin
2802 * @pages: array that receives pointers to the pages pinned.
2805 * Attempt to pin user pages in memory without taking mm->mmap_lock.
2809 * Returns number of pages pinned. This may be fewer than the number requested.
2810 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2814 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
2826 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
2831 * pin_user_pages_fast() - pin user pages in memory without taking locks
2834 * @nr_pages: number of pages from start to pin
2836 * @pages: array that receives pointers to the pages pinned.
2843 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2847 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
2854 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
2865 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast_only() argument
2881 pages); in pin_user_pages_fast_only()
2895 * pin_user_pages_remote() - pin pages of a remote process
2899 * @nr_pages: number of pages from start to pin
2901 * @pages: array that receives pointers to the pages pinned.
2903 * only intends to ensure the pages are faulted in.
2914 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2919 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
2928 pages, vmas, locked); in pin_user_pages_remote()
2933 * pin_user_pages() - pin user pages in memory for use by other devices
2936 * @nr_pages: number of pages from start to pin
2938 * @pages: array that receives pointers to the pages pinned.
2940 * only intends to ensure the pages are faulted in.
2947 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2951 unsigned int gup_flags, struct page **pages, in pin_user_pages() argument
2960 pages, vmas, gup_flags); in pin_user_pages()
2970 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
2977 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); in pin_user_pages_unlocked()
2987 unsigned int gup_flags, struct page **pages, in pin_user_pages_locked() argument
3005 pages, NULL, locked, in pin_user_pages_locked()