Lines Matching full:pages

33  * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
34 * @pages: array of pages to be maybe marked dirty, and definitely released.
35 * @npages: number of pages in the @pages array.
36 * @make_dirty: whether to mark the pages dirty
41 * For each page in the @pages array, make that page (or its head page, if a
43 * listed as clean. In any case, releases all pages using put_user_page(),
54 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, in put_user_pages_dirty_lock() argument
60 * TODO: this can be optimized for huge pages: if a series of pages is in put_user_pages_dirty_lock()
66 put_user_pages(pages, npages); in put_user_pages_dirty_lock()
71 struct page *page = compound_head(pages[index]); in put_user_pages_dirty_lock()
100 * put_user_pages() - release an array of gup-pinned pages.
101 * @pages: array of pages to be marked dirty and released.
102 * @npages: number of pages in the @pages array.
104 * For each page in the @pages array, release the page using put_user_page().
108 void put_user_pages(struct page **pages, unsigned long npages) in put_user_pages() argument
113 * TODO: this can be optimized for huge pages: if a series of pages is in put_user_pages()
118 put_user_page(pages[index]); in put_user_pages()
128 * has touched so far, we don't want to allocate unnecessary pages or in no_page_table()
216 * Only return device mapping pages in the FOLL_GET case since in follow_page_pte()
226 /* Avoid special (like zero) pages in core dumps */ in follow_page_pte()
287 lru_add_drain(); /* push cached pages to LRU */ in follow_page_pte()
499 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
571 /* user gate pages are read-only */ in get_gate_page()
624 /* mlock all present pages, but do not fault in new pages */ in faultin_page()
698 * Anon pages in shared mappings are surprising: now in check_vma_flags()
724 * __get_user_pages() - pin user pages in memory
728 * @nr_pages: number of pages from start to pin
730 * @pages: array that receives pointers to the pages pinned.
732 * only intends to ensure the pages are faulted in.
737 * Returns number of pages pinned. This may be fewer than the number
738 * requested. If nr_pages is 0 or negative, returns 0. If no pages
765 * or mmap_sem contention, and if waiting is needed to pin all pages,
781 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
793 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); in __get_user_pages()
814 pages ? &pages[i] : NULL); in __get_user_pages()
826 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
834 * If we have a pending SIGKILL, don't keep faulting pages and in __get_user_pages()
871 if (pages) { in __get_user_pages()
872 pages[i] = page; in __get_user_pages()
1002 struct page **pages, in __get_user_pages_locked() argument
1017 if (pages) in __get_user_pages_locked()
1023 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1052 * For the prefault case (!pages) we only update counts. in __get_user_pages_locked()
1054 if (likely(pages)) in __get_user_pages_locked()
1055 pages += ret; in __get_user_pages_locked()
1067 pages, NULL, NULL); in __get_user_pages_locked()
1078 if (likely(pages)) in __get_user_pages_locked()
1079 pages++; in __get_user_pages_locked()
1094 * get_user_pages_remote() - pin user pages in memory
1099 * @nr_pages: number of pages from start to pin
1101 * @pages: array that receives pointers to the pages pinned.
1103 * only intends to ensure the pages are faulted in.
1110 * Returns number of pages pinned. This may be fewer than the number
1111 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1138 * addresses. The pages may be submitted for DMA to devices or accessed via
1151 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1163 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, in get_user_pages_remote()
1170 * populate_vma_page_range() - populate a range of pages in the vma.
1176 * This takes care of mlocking the pages too if VM_LOCKED is set.
1228 * __mm_populate - populate and/or mlock pages within a range of address space.
1246 * We want to fault in pages for [nstart; end) address range. in __mm_populate()
1267 * Now fault in a range of pages. populate_vma_page_range() in __mm_populate()
1268 * double checks the vma flags, so that it won't mlock pages in __mm_populate()
1318 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1344 if (pages) { in __get_user_pages_locked()
1345 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1346 if (pages[i]) in __get_user_pages_locked()
1347 get_page(pages[i]); in __get_user_pages_locked()
1405 * We don't want to dequeue from the pool because pool pages will in new_non_cma_page()
1437 struct page **pages, in check_and_migrate_cma_pages() argument
1450 struct page *head = compound_head(pages[i]); in check_and_migrate_cma_pages()
1456 step = compound_nr(head) - (pages[i] - head); in check_and_migrate_cma_pages()
1489 put_page(pages[i]); in check_and_migrate_cma_pages()
1494 * some of the pages failed migration. Do get_user_pages in check_and_migrate_cma_pages()
1503 * We did migrate all the pages, Try to get the page references in check_and_migrate_cma_pages()
1504 * again migrating any new CMA pages which we failed to isolate in check_and_migrate_cma_pages()
1508 pages, vmas, NULL, in check_and_migrate_cma_pages()
1524 struct page **pages, in check_and_migrate_cma_pages() argument
1540 struct page **pages, in __gup_longterm_locked() argument
1549 if (!pages) in __gup_longterm_locked()
1562 rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, in __gup_longterm_locked()
1572 put_page(pages[i]); in __gup_longterm_locked()
1577 rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, in __gup_longterm_locked()
1591 struct page **pages, in __gup_longterm_locked() argument
1595 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, in __gup_longterm_locked()
1608 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
1612 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
1625 * get_user_pages(tsk, mm, ..., pages, NULL);
1633 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
1638 unsigned int gup_flags, struct page **pages, in get_user_pages_locked() argument
1651 pages, NULL, locked, in get_user_pages_locked()
1660 * get_user_pages(tsk, mm, ..., pages, NULL);
1665 * get_user_pages_unlocked(tsk, mm, ..., pages);
1672 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
1688 ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
1699 * get_user_pages_fast attempts to pin user pages by walking the page
1701 * protected from page table pages being freed from under it, and should
1706 * pages are freed. This is unsuitable for architectures that do not need
1709 * Another way to achieve this is to batch up page table containing pages
1711 * pages. Disabling interrupts will allow the fast_gup walker to both block
1719 * free pages containing page tables or TLB flushing requires IPI broadcast.
1786 struct page **pages) in undo_dev_pagemap() argument
1789 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
1812 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
1839 undo_dev_pagemap(nr, nr_start, pages); in gup_pte_range()
1860 pages[*nr] = page; in gup_pte_range()
1881 * __get_user_pages_fast implementation that can pin pages. Thus it's still
1885 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument
1893 unsigned long end, struct page **pages, int *nr) in __gup_device_huge() argument
1903 undo_dev_pagemap(nr, nr_start, pages); in __gup_device_huge()
1907 pages[*nr] = page; in __gup_device_huge()
1919 unsigned long end, struct page **pages, int *nr) in __gup_device_huge_pmd() argument
1925 if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) in __gup_device_huge_pmd()
1929 undo_dev_pagemap(nr, nr_start, pages); in __gup_device_huge_pmd()
1936 unsigned long end, struct page **pages, int *nr) in __gup_device_huge_pud() argument
1942 if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) in __gup_device_huge_pud()
1946 undo_dev_pagemap(nr, nr_start, pages); in __gup_device_huge_pud()
1953 unsigned long end, struct page **pages, int *nr) in __gup_device_huge_pmd() argument
1960 unsigned long end, struct page **pages, int *nr) in __gup_device_huge_pud() argument
1977 struct page **pages, int *nr) in gup_hugepte() argument
2002 pages[*nr] = page; in gup_hugepte()
2028 struct page **pages, int *nr) in gup_huge_pd() argument
2037 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2046 struct page **pages, int *nr) in gup_huge_pd() argument
2054 struct page **pages, int *nr) in gup_huge_pmd() argument
2065 return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); in gup_huge_pmd()
2071 pages[*nr] = page; in gup_huge_pmd()
2095 unsigned long end, unsigned int flags, struct page **pages, int *nr) in gup_huge_pud() argument
2106 return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); in gup_huge_pud()
2112 pages[*nr] = page; in gup_huge_pud()
2137 struct page **pages, int *nr) in gup_huge_pgd() argument
2149 pages[*nr] = page; in gup_huge_pgd()
2173 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2197 pages, nr)) in gup_pmd_range()
2206 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2208 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) in gup_pmd_range()
2216 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2230 pages, nr)) in gup_pud_range()
2234 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2236 } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) in gup_pud_range()
2244 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2259 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2261 } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2269 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2283 pages, nr)) in gup_pgd_range()
2287 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2289 } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2295 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2315 * number of pages pinned, 0 if no pages were pinned.
2318 * pages pinned.
2321 struct page **pages) in __get_user_pages_fast() argument
2340 * With interrupts disabled, we block page table pages from being in __get_user_pages_fast()
2351 gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); in __get_user_pages_fast()
2360 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2372 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2376 pages, gup_flags); in __gup_longterm_unlocked()
2383 * get_user_pages_fast() - pin user pages in memory
2385 * @nr_pages: number of pages from start to pin
2387 * @pages: array that receives pointers to the pages pinned.
2390 * Attempt to pin user pages in memory without taking mm->mmap_sem.
2394 * Returns number of pages pinned. This may be fewer than the number
2395 * requested. If nr_pages is 0 or negative, returns 0. If no pages
2399 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
2420 gup_pgd_range(addr, end, gup_flags, pages, &nr); in get_user_pages_fast()
2426 /* Try to get the remaining pages with get_user_pages */ in get_user_pages_fast()
2428 pages += nr; in get_user_pages_fast()
2431 gup_flags, pages); in get_user_pages_fast()