Lines Matching full:pages
32 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument
39 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages()
43 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages()
50 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
51 struct page *page = *pages; in sanity_check_pinned_pages()
242 * Pages that were pinned via pin_user_pages*() must be released via either
244 * that such pages can be separately tracked and uniquely handled. In
285 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
286 * @pages: array of pages to be maybe marked dirty, and definitely released.
287 * @npages: number of pages in the @pages array.
288 * @make_dirty: whether to mark the pages dirty
293 * For each page in the @pages array, make that page (or its head page, if a
295 * listed as clean. In any case, releases all pages using unpin_user_page(),
306 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
314 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
318 sanity_check_pinned_pages(pages, npages); in unpin_user_pages_dirty_lock()
320 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_dirty_lock()
356 * @npages: number of consecutive pages to release.
357 * @make_dirty: whether to mark the pages dirty
359 * "gup-pinned page range" refers to a range of pages that has had one of the
363 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
391 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) in unpin_user_pages_lockless() argument
399 * fork() and some anonymous pages might now actually be shared -- in unpin_user_pages_lockless()
403 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages_lockless()
409 * unpin_user_pages() - release an array of gup-pinned pages.
410 * @pages: array of pages to be marked dirty and released.
411 * @npages: number of pages in the @pages array.
413 * For each page in the @pages array, release the page using unpin_user_page().
417 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
424 * If this WARN_ON() fires, then the system *might* be leaking pages (by in unpin_user_pages()
431 sanity_check_pinned_pages(pages, npages); in unpin_user_pages()
433 folio = gup_folio_next(pages, npages, i, &nr); in unpin_user_pages()
456 * has touched so far, we don't want to allocate unnecessary pages or in no_page_table()
582 * We only care about anon pages in can_follow_write_pte() and don't in follow_page_pte()
593 * Only return device mapping pages in the FOLL_GET or FOLL_PIN in follow_page_pte()
604 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
849 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
932 /* user gate pages are read-only */ in get_gate_page()
1074 * Anon pages in shared mappings are surprising: now in check_vma_flags()
1100 * __get_user_pages() - pin user pages in memory
1103 * @nr_pages: number of pages from start to pin
1105 * @pages: array that receives pointers to the pages pinned.
1107 * only intends to ensure the pages are faulted in.
1112 * Returns either number of pages pinned (which may be less than the
1116 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1117 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1118 * pages pinned. Again, this may be less than nr_pages.
1121 * The caller is responsible for releasing returned @pages, via put_page().
1161 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1173 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1186 pages ? &pages[i] : NULL); in __get_user_pages()
1202 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1219 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
1248 * struct page. If the caller expects **pages to be in __get_user_pages()
1252 if (pages) { in __get_user_pages()
1262 if (pages) { in __get_user_pages()
1263 pages[i] = page; in __get_user_pages()
1401 struct page **pages, in __get_user_pages_locked() argument
1421 * is to set FOLL_GET if the caller wants pages[] filled in (but has in __get_user_pages_locked()
1425 * FOLL_PIN always expects pages to be non-null, but no need to assert in __get_user_pages_locked()
1428 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1434 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1463 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1465 if (likely(pages)) in __get_user_pages_locked()
1466 pages += ret; in __get_user_pages_locked()
1495 pages, NULL, locked); in __get_user_pages_locked()
1511 if (likely(pages)) in __get_user_pages_locked()
1512 pages++; in __get_user_pages_locked()
1527 * populate_vma_page_range() - populate a range of pages in the vma.
1533 * This takes care of mlocking the pages too if VM_LOCKED is set.
1535 * Return either number of pages pinned in the vma, or a negative error
1597 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1605 * Returns either number of processed pages in the vma, or a negative error
1657 * __mm_populate - populate and/or mlock pages within a range of address space.
1675 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
1697 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
1698 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
1718 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1744 if (pages) { in __get_user_pages_locked()
1745 pages[i] = virt_to_page((void *)start); in __get_user_pages_locked()
1746 if (pages[i]) in __get_user_pages_locked()
1747 get_page(pages[i]); in __get_user_pages_locked()
1832 * already know that some or all of the pages in the address range aren't in
1837 * Note that we don't pin or otherwise hold the pages referenced that we fault
1942 * Returns the number of collected pages. Return value is always >= 0.
1947 struct page **pages) in collect_longterm_unpinnable_pages() argument
1954 struct folio *folio = page_folio(pages[i]); in collect_longterm_unpinnable_pages()
1991 * Unpins all pages and migrates device coherent pages and movable_page_list.
1992 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
1998 struct page **pages) in migrate_longterm_unpinnable_pages() argument
2004 struct folio *folio = page_folio(pages[i]); in migrate_longterm_unpinnable_pages()
2011 pages[i] = NULL; in migrate_longterm_unpinnable_pages()
2024 * We can't migrate pages with unexpected references, so drop in migrate_longterm_unpinnable_pages()
2026 * Migrating pages have been added to movable_page_list after in migrate_longterm_unpinnable_pages()
2030 unpin_user_page(pages[i]); in migrate_longterm_unpinnable_pages()
2031 pages[i] = NULL; in migrate_longterm_unpinnable_pages()
2054 if (pages[i]) in migrate_longterm_unpinnable_pages()
2055 unpin_user_page(pages[i]); in migrate_longterm_unpinnable_pages()
2062 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2063 * pages in the range are required to be pinned via FOLL_PIN, before calling
2066 * If any pages in the range are not allowed to be pinned, then this routine
2067 * will migrate those pages away, unpin all the pages in the range and return
2074 * If everything is OK and all pages in the range are allowed to be pinned, then
2075 * this routine leaves all pages pinned and returns zero for success.
2078 struct page **pages) in check_and_migrate_movable_pages() argument
2084 nr_pages, pages); in check_and_migrate_movable_pages()
2089 pages); in check_and_migrate_movable_pages()
2093 struct page **pages) in check_and_migrate_movable_pages() argument
2106 struct page **pages, in __gup_longterm_locked() argument
2114 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
2121 * which assumes pages have been pinned via FOLL_PIN. in __gup_longterm_locked()
2130 pages, vmas, NULL, in __gup_longterm_locked()
2136 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); in __gup_longterm_locked()
2165 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
2182 return __gup_longterm_locked(mm, start, nr_pages, pages, in __get_user_pages_remote()
2187 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __get_user_pages_remote()
2193 * get_user_pages_remote() - pin user pages in memory
2196 * @nr_pages: number of pages from start to pin
2198 * @pages: array that receives pointers to the pages pinned.
2200 * only intends to ensure the pages are faulted in.
2207 * Returns either number of pages pinned (which may be less than the
2211 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2212 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2213 * pages pinned. Again, this may be less than nr_pages.
2215 * The caller is responsible for releasing returned @pages, via put_page().
2241 * via the user virtual addresses. The pages may be submitted for
2254 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2261 pages, vmas, locked); in get_user_pages_remote()
2268 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
2276 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
2284 * get_user_pages() - pin user pages in memory
2286 * @nr_pages: number of pages from start to pin
2288 * @pages: array that receives pointers to the pages pinned.
2290 * only intends to ensure the pages are faulted in.
2300 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
2307 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
2315 * get_user_pages(mm, ..., pages, NULL);
2320 * get_user_pages_unlocked(mm, ..., pages);
2327 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2343 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2354 * get_user_pages_fast attempts to pin user pages by walking the page
2356 * protected from page table pages being freed from under it, and should
2361 * pages are freed. This is unsuitable for architectures that do not need
2364 * Another way to achieve this is to batch up page table containing pages
2366 * pages. Disabling interrupts will allow the fast_gup walker to both block
2374 * free pages containing page tables or TLB flushing requires IPI broadcast.
2388 struct page **pages) in undo_dev_pagemap() argument
2391 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2423 struct page **pages, int *nr) in gup_pte_range() argument
2447 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2490 pages[*nr] = page; in gup_pte_range()
2510 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2515 struct page **pages, int *nr) in gup_pte_range() argument
2524 struct page **pages, int *nr) in __gup_device_huge() argument
2534 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2538 pages[*nr] = page; in __gup_device_huge()
2540 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2553 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2559 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2563 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2571 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2577 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2581 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2589 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2597 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2605 unsigned long end, struct page **pages) in record_subpages() argument
2610 pages[nr] = nth_page(page, nr); in record_subpages()
2625 struct page **pages, int *nr) in gup_hugepte() argument
2646 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2669 struct page **pages, int *nr) in gup_huge_pd() argument
2678 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2687 struct page **pages, int *nr) in gup_huge_pd() argument
2695 struct page **pages, int *nr) in gup_huge_pmd() argument
2708 pages, nr); in gup_huge_pmd()
2712 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2735 struct page **pages, int *nr) in gup_huge_pud() argument
2748 pages, nr); in gup_huge_pud()
2752 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2775 struct page **pages, int *nr) in gup_huge_pgd() argument
2787 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2804 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2824 pages, nr)) in gup_pmd_range()
2833 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2835 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) in gup_pmd_range()
2843 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2857 pages, nr)) in gup_pud_range()
2861 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2863 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
2871 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2886 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2888 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2896 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2910 pages, nr)) in gup_pgd_range()
2914 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2916 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2922 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2939 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2951 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2955 pages, gup_flags); in __gup_longterm_unlocked()
2964 struct page **pages) in lockless_pages_from_mm() argument
2984 * With interrupts disabled, we block page table pages from being freed in lockless_pages_from_mm()
2992 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
2996 * When pinning pages for DMA there could be a concurrent write protect in lockless_pages_from_mm()
3001 unpin_user_pages_lockless(pages, nr_pinned); in lockless_pages_from_mm()
3004 sanity_check_pinned_pages(pages, nr_pinned); in lockless_pages_from_mm()
3013 struct page **pages) in internal_get_user_pages_fast() argument
3037 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
3041 /* Slow path: try to get the remaining pages with get_user_pages */ in internal_get_user_pages_fast()
3043 pages += nr_pinned; in internal_get_user_pages_fast()
3045 pages); in internal_get_user_pages_fast()
3048 * The caller has to unpin the pages we already pinned so in internal_get_user_pages_fast()
3059 * get_user_pages_fast_only() - pin user pages in memory
3061 * @nr_pages: number of pages from start to pin
3063 * @pages: array that receives pointers to the pages pinned.
3069 * number of pages pinned, 0 if no pages were pinned.
3072 * pages pinned.
3079 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
3092 pages); in get_user_pages_fast_only()
3108 * get_user_pages_fast() - pin user pages in memory
3110 * @nr_pages: number of pages from start to pin
3112 * @pages: array that receives pointers to the pages pinned.
3115 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3119 * Returns number of pages pinned. This may be fewer than the number requested.
3120 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3124 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
3136 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
3141 * pin_user_pages_fast() - pin user pages in memory without taking locks
3144 * @nr_pages: number of pages from start to pin
3146 * @pages: array that receives pointers to the pages pinned.
3153 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3157 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
3163 if (WARN_ON_ONCE(!pages)) in pin_user_pages_fast()
3167 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
3178 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast_only() argument
3189 if (WARN_ON_ONCE(!pages)) in pin_user_pages_fast_only()
3197 pages); in pin_user_pages_fast_only()
3211 * pin_user_pages_remote() - pin pages of a remote process
3215 * @nr_pages: number of pages from start to pin
3217 * @pages: array that receives pointers to the pages pinned.
3229 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3234 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
3241 if (WARN_ON_ONCE(!pages)) in pin_user_pages_remote()
3246 pages, vmas, locked); in pin_user_pages_remote()
3251 * pin_user_pages() - pin user pages in memory for use by other devices
3254 * @nr_pages: number of pages from start to pin
3256 * @pages: array that receives pointers to the pages pinned.
3264 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3268 unsigned int gup_flags, struct page **pages, in pin_user_pages() argument
3275 if (WARN_ON_ONCE(!pages)) in pin_user_pages()
3280 pages, vmas, gup_flags); in pin_user_pages()
3290 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
3296 if (WARN_ON_ONCE(!pages)) in pin_user_pages_unlocked()
3300 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); in pin_user_pages_unlocked()