Lines Matching refs:vma
364 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument
368 unsigned long addr = vma->vm_start; in free_pgtables()
382 vma_start_write(vma); in free_pgtables()
383 unlink_anon_vmas(vma); in free_pgtables()
384 unlink_file_vma(vma); in free_pgtables()
386 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
387 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
393 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
395 vma = next; in free_pgtables()
398 vma_start_write(vma); in free_pgtables()
399 unlink_anon_vmas(vma); in free_pgtables()
400 unlink_file_vma(vma); in free_pgtables()
402 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
405 vma = next; in free_pgtables()
406 } while (vma); in free_pgtables()
488 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
491 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
520 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
521 index = linear_page_index(vma, addr); in print_bad_pte()
529 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
531 vma->vm_file, in print_bad_pte()
532 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
533 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
581 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
589 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
590 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
591 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
606 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
612 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
613 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
619 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
620 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
622 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
632 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
644 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, in vm_normal_folio() argument
647 struct page *page = vm_normal_page(vma, addr, pte); in vm_normal_folio()
655 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
665 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
666 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
672 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
673 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
675 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
696 static void restore_exclusive_pte(struct vm_area_struct *vma, in restore_exclusive_pte() argument
705 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
713 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in restore_exclusive_pte()
722 page_add_anon_rmap(page, vma, address, RMAP_NONE); in restore_exclusive_pte()
730 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
736 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte()
744 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, in try_restore_exclusive_pte() argument
751 restore_exclusive_pte(vma, page, addr, src_pte); in try_restore_exclusive_pte()
983 struct vm_area_struct *vma, unsigned long addr) in page_copy_prealloc() argument
987 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); in page_copy_prealloc()
1380 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, in zap_install_uffd_wp_if_needed() argument
1385 if (vma_is_anonymous(vma)) in zap_install_uffd_wp_if_needed()
1391 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); in zap_install_uffd_wp_if_needed()
1395 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1428 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1433 arch_check_zapped_pte(vma, ptent); in zap_pte_range()
1435 zap_install_uffd_wp_if_needed(vma, addr, pte, details, in zap_pte_range()
1451 if (pte_young(ptent) && likely(vma_has_recency(vma))) in zap_pte_range()
1456 page_remove_rmap(page, vma, false); in zap_pte_range()
1458 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1480 WARN_ON_ONCE(!vma_is_anonymous(vma)); in zap_pte_range()
1483 page_remove_rmap(page, vma, false); in zap_pte_range()
1491 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1502 if (!vma_is_anonymous(vma) && in zap_pte_range()
1514 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); in zap_pte_range()
1523 tlb_flush_rmaps(tlb, vma); in zap_pte_range()
1540 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1552 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1553 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { in zap_pmd_range()
1573 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1582 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
1595 split_huge_pud(vma, pud, addr); in zap_pud_range()
1596 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1602 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1611 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1623 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1630 struct vm_area_struct *vma, in unmap_page_range() argument
1638 tlb_start_vma(tlb, vma); in unmap_page_range()
1639 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1644 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1646 tlb_end_vma(tlb, vma); in unmap_page_range()
1651 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1655 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1658 if (start >= vma->vm_end) in unmap_single_vma()
1660 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1661 if (end <= vma->vm_start) in unmap_single_vma()
1664 if (vma->vm_file) in unmap_single_vma()
1665 uprobe_munmap(vma, start, end); in unmap_single_vma()
1667 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1668 untrack_pfn(vma, 0, 0, mm_wr_locked); in unmap_single_vma()
1671 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1683 if (vma->vm_file) { in unmap_single_vma()
1686 __unmap_hugepage_range(tlb, vma, start, end, in unmap_single_vma()
1690 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1716 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1727 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
1733 hugetlb_zap_begin(vma, &start, &end); in unmap_vmas()
1734 unmap_single_vma(tlb, vma, start, end, &details, in unmap_vmas()
1736 hugetlb_zap_end(vma, &details); in unmap_vmas()
1737 } while ((vma = mas_find(mas, tree_end - 1)) != NULL); in unmap_vmas()
1750 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1758 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single()
1760 hugetlb_zap_begin(vma, &range.start, &range.end); in zap_page_range_single()
1761 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1762 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1768 unmap_single_vma(&tlb, vma, address, end, details, false); in zap_page_range_single()
1771 hugetlb_zap_end(vma, details); in zap_page_range_single()
1785 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1788 if (!range_in_vma(vma, address, address + size) || in zap_vma_ptes()
1789 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1792 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1836 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_into_pte_locked() argument
1843 inc_mm_counter(vma->vm_mm, mm_counter_file(page)); in insert_page_into_pte_locked()
1844 page_add_file_rmap(page, vma, false); in insert_page_into_pte_locked()
1845 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1856 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1867 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
1870 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page()
1876 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_in_batch_locked() argument
1886 return insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page_in_batch_locked()
1892 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
1898 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1927 int err = insert_page_in_batch_locked(vma, pte, in insert_pages()
1965 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
1970 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
1972 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
1973 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
1974 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
1975 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_pages()
1978 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
2011 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
2014 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
2018 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
2019 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2020 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
2021 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_page()
2023 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
2038 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, in __vm_map_pages() argument
2041 unsigned long count = vma_pages(vma); in __vm_map_pages()
2042 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
2054 ret = vm_insert_page(vma, uaddr, pages[offset + i]); in __vm_map_pages()
2081 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
2084 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
2101 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
2104 return __vm_map_pages(vma, pages, num, 0); in vm_map_pages_zero()
2108 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2111 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2136 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2137 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2138 update_mmu_cache(vma, addr, pte); in insert_pfn()
2151 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2155 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2195 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2204 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2205 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2207 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2208 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2210 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2216 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); in vmf_insert_pfn_prot()
2218 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
2243 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2246 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2250 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) in vm_mixed_ok() argument
2253 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2264 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, in __vm_insert_mixed() argument
2267 pgprot_t pgprot = vma->vm_page_prot; in __vm_insert_mixed()
2270 BUG_ON(!vm_mixed_ok(vma, pfn)); in __vm_insert_mixed()
2272 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2275 track_pfn_insert(vma, &pgprot, pfn); in __vm_insert_mixed()
2297 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
2299 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2310 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2313 return __vm_insert_mixed(vma, addr, pfn, false); in vmf_insert_mixed()
2322 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
2325 return __vm_insert_mixed(vma, addr, pfn, true); in vmf_insert_mixed_mkwrite()
2431 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_notrack() argument
2437 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_notrack()
2461 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range_notrack()
2462 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range_notrack()
2464 vma->vm_pgoff = pfn; in remap_pfn_range_notrack()
2467 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); in remap_pfn_range_notrack()
2472 flush_cache_range(vma, addr, end); in remap_pfn_range_notrack()
2496 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2501 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2505 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); in remap_pfn_range()
2507 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); in remap_pfn_range()
2527 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
2546 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2548 pfn += vma->vm_pgoff; in vm_iomap_memory()
2549 pages -= vma->vm_pgoff; in vm_iomap_memory()
2552 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2557 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
2811 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
2812 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
2816 if (copy_mc_user_highpage(dst, src, addr, vma)) { in __wp_page_copy_user()
2847 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2853 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
2854 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
2872 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2903 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
2905 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
2930 if (vmf->vma->vm_file && in do_page_mkwrite()
2931 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2934 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2958 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
2962 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
2976 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3012 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3027 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3029 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
3030 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3031 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3056 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3057 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3069 if (unlikely(anon_vma_prepare(vma))) in wp_page_copy()
3073 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); in wp_page_copy()
3077 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, in wp_page_copy()
3126 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3127 entry = mk_pte(&new_folio->page, vma->vm_page_prot); in wp_page_copy()
3135 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
3145 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3146 folio_add_new_anon_rmap(new_folio, vma, vmf->address); in wp_page_copy()
3147 folio_add_lru_vma(new_folio, vma); in wp_page_copy()
3155 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3179 page_remove_rmap(vmf->page, vma, false); in wp_page_copy()
3187 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3231 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3232 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3241 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3255 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
3257 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3262 vma_end_read(vmf->vma); in wp_pfn_shared()
3267 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3279 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
3284 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
3290 vma_end_read(vmf->vma); in wp_page_shared()
3342 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
3346 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3355 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3356 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3357 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3360 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3369 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in do_wp_page()
3424 page_move_anon_rmap(vmf->page, vma); in do_wp_page()
3435 if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) { in do_wp_page()
3437 vma_end_read(vmf->vma); in do_wp_page()
3455 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
3459 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
3467 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
3470 vma_interval_tree_foreach(vma, root, first_index, last_index) { in unmap_mapping_range_tree()
3471 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
3472 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
3476 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
3477 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3478 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3589 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry() local
3610 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3614 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3617 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
3629 struct vm_area_struct *vma, in should_try_to_free_swap() argument
3634 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
3649 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3662 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3669 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
3685 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
3724 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
3741 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3752 vma_end_read(vma); in do_swap_page()
3758 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3778 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3789 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
3799 vma, vmf->address, false); in do_swap_page()
3806 vma->vm_mm, GFP_KERNEL, in do_swap_page()
3837 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3848 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3879 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3905 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3975 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
3978 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3979 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
3980 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
3991 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
3996 flush_icache_page(vma, page); in do_swap_page()
4005 page_add_new_anon_rmap(page, vma, vmf->address); in do_swap_page()
4006 folio_add_lru_vma(folio, vma); in do_swap_page()
4008 page_add_anon_rmap(page, vma, vmf->address, rmap_flags); in do_swap_page()
4013 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
4014 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
4038 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_swap_page()
4070 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
4076 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
4083 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4088 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4090 vma->vm_page_prot)); in do_anonymous_page()
4091 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4096 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4099 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4103 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4111 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
4113 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); in do_anonymous_page()
4117 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
4128 entry = mk_pte(&folio->page, vma->vm_page_prot); in do_anonymous_page()
4130 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
4131 entry = pte_mkwrite(pte_mkdirty(entry), vma); in do_anonymous_page()
4133 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
4138 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4142 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4147 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4153 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
4154 folio_add_new_anon_rmap(folio, vma, vmf->address); in do_anonymous_page()
4155 folio_add_lru_vma(folio, vma); in do_anonymous_page()
4159 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4162 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_anonymous_page()
4183 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
4202 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4207 ret = vma->vm_ops->fault(vmf); in __do_fault()
4240 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
4242 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4247 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4253 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
4259 if (!transhuge_vma_suitable(vma, haddr)) in do_set_pmd()
4280 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4285 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4289 flush_icache_pages(vma, page, HPAGE_PMD_NR); in do_set_pmd()
4291 entry = mk_huge_pmd(page, vma->vm_page_prot); in do_set_pmd()
4293 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
4295 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4296 page_add_file_rmap(page, vma, true); in do_set_pmd()
4304 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4306 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4333 struct vm_area_struct *vma = vmf->vma; in set_pte_range() local
4339 flush_icache_pages(vma, page, nr); in set_pte_range()
4340 entry = mk_pte(page, vma->vm_page_prot); in set_pte_range()
4348 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in set_pte_range()
4352 if (write && !(vma->vm_flags & VM_SHARED)) { in set_pte_range()
4353 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); in set_pte_range()
4355 folio_add_new_anon_rmap(folio, vma, addr); in set_pte_range()
4356 folio_add_lru_vma(folio, vma); in set_pte_range()
4358 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); in set_pte_range()
4359 folio_add_file_rmap_range(folio, page, nr, vma, false); in set_pte_range()
4361 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
4364 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
4392 struct vm_area_struct *vma = vmf->vma; in finish_fault() local
4397 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in finish_fault()
4406 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
4407 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4420 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4421 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4425 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4437 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4509 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
4519 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
4522 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4528 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
4540 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
4543 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
4567 vma_end_read(vmf->vma); in do_read_fault()
4585 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
4589 vma_end_read(vma); in do_cow_fault()
4593 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
4596 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4600 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
4613 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4629 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
4634 vma_end_read(vma); in do_shared_fault()
4648 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
4680 struct vm_area_struct *vma = vmf->vma; in do_fault() local
4681 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
4687 if (!vma->vm_ops->fault) { in do_fault()
4688 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
4709 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
4722 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
4728 vma_set_access_pid_bit(vma); in numa_migrate_prep()
4736 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
4741 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
4763 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
4770 if (!writable && vma_wants_manual_pte_write_upgrade(vma) && in do_numa_page()
4771 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
4774 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
4797 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
4810 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4820 if (migrate_misplaced_page(page, vma, target_nid)) { in do_numa_page()
4825 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
4845 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4846 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
4849 pte = pte_mkwrite(pte, vma); in do_numa_page()
4850 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4851 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_numa_page()
4858 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd() local
4859 if (vma_is_anonymous(vma)) in create_huge_pmd()
4861 if (vma->vm_ops->huge_fault) in create_huge_pmd()
4862 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
4869 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd() local
4873 if (vma_is_anonymous(vma)) { in wp_huge_pmd()
4875 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) in wp_huge_pmd()
4880 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pmd()
4881 if (vma->vm_ops->huge_fault) { in wp_huge_pmd()
4882 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
4889 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4898 struct vm_area_struct *vma = vmf->vma; in create_huge_pud() local
4900 if (vma_is_anonymous(vma)) in create_huge_pud()
4902 if (vma->vm_ops->huge_fault) in create_huge_pud()
4903 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
4912 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud() local
4916 if (vma_is_anonymous(vma)) in wp_huge_pud()
4918 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pud()
4919 if (vma->vm_ops->huge_fault) { in wp_huge_pud()
4920 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
4927 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
4967 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
4986 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4992 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
5002 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
5004 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
5017 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
5031 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
5035 .vma = vma, in __handle_mm_fault()
5039 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
5040 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
5042 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5043 unsigned long vm_flags = vma->vm_flags; in __handle_mm_fault()
5058 hugepage_vma_check(vma, vm_flags, false, true, true)) { in __handle_mm_fault()
5092 hugepage_vma_check(vma, vm_flags, false, true, true)) { in __handle_mm_fault()
5107 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
5194 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
5197 current->in_lru_fault = vma_has_recency(vma); in lru_gen_enter_fault()
5205 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
5214 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, in sanitize_fault_flags() argument
5224 if (!is_cow_mapping(vma->vm_flags)) in sanitize_fault_flags()
5228 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) in sanitize_fault_flags()
5231 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && in sanitize_fault_flags()
5232 !is_cow_mapping(vma->vm_flags))) in sanitize_fault_flags()
5255 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
5259 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
5264 ret = sanitize_fault_flags(vma, &flags); in handle_mm_fault()
5268 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
5282 lru_gen_enter_fault(vma); in handle_mm_fault()
5284 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
5285 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5287 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
5372 struct vm_area_struct *vma; in lock_mm_and_find_vma() local
5377 vma = find_vma(mm, addr); in lock_mm_and_find_vma()
5378 if (likely(vma && (vma->vm_start <= addr))) in lock_mm_and_find_vma()
5379 return vma; in lock_mm_and_find_vma()
5385 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { in lock_mm_and_find_vma()
5403 vma = find_vma(mm, addr); in lock_mm_and_find_vma()
5404 if (!vma) in lock_mm_and_find_vma()
5406 if (vma->vm_start <= addr) in lock_mm_and_find_vma()
5408 if (!(vma->vm_flags & VM_GROWSDOWN)) in lock_mm_and_find_vma()
5412 if (expand_stack_locked(vma, addr)) in lock_mm_and_find_vma()
5417 return vma; in lock_mm_and_find_vma()
5435 struct vm_area_struct *vma; in lock_vma_under_rcu() local
5439 vma = mas_walk(&mas); in lock_vma_under_rcu()
5440 if (!vma) in lock_vma_under_rcu()
5443 if (!vma_start_read(vma)) in lock_vma_under_rcu()
5452 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) in lock_vma_under_rcu()
5456 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in lock_vma_under_rcu()
5460 if (vma->detached) { in lock_vma_under_rcu()
5461 vma_end_read(vma); in lock_vma_under_rcu()
5468 return vma; in lock_vma_under_rcu()
5471 vma_end_read(vma); in lock_vma_under_rcu()
5622 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
5629 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
5632 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5642 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
5650 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
5653 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
5682 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
5693 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in generic_access_phys()
5697 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5712 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5759 struct vm_area_struct *vma = NULL; in __access_remote_vm() local
5761 gup_flags, &vma); in __access_remote_vm()
5765 vma = vma_lookup(mm, addr); in __access_remote_vm()
5766 if (!vma) { in __access_remote_vm()
5767 vma = expand_stack(mm, addr); in __access_remote_vm()
5770 if (!vma) in __access_remote_vm()
5784 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
5785 bytes = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
5798 copy_to_user_page(vma, page, addr, in __access_remote_vm()
5802 copy_from_user_page(vma, page, addr, in __access_remote_vm()
5864 struct vm_area_struct *vma; in print_vma_addr() local
5872 vma = find_vma(mm, ip); in print_vma_addr()
5873 if (vma && vma->vm_file) { in print_vma_addr()
5874 struct file *f = vma->vm_file; in print_vma_addr()
5883 vma->vm_start, in print_vma_addr()
5884 vma->vm_end - vma->vm_start); in print_vma_addr()
6005 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
6018 addr + i*PAGE_SIZE, vma)) { in copy_user_gigantic_page()
6029 struct vm_area_struct *vma; member
6037 addr, copy_arg->vma)) { in copy_subpage()
6045 unsigned long addr_hint, struct vm_area_struct *vma) in copy_user_large_folio() argument
6053 .vma = vma, in copy_user_large_folio()
6057 return copy_user_gigantic_page(dst, src, addr, vma, in copy_user_large_folio()