Lines Matching refs:vma

615 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,  in free_pgtables()  argument
618 while (vma) { in free_pgtables()
619 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
620 unsigned long addr = vma->vm_start; in free_pgtables()
626 unlink_anon_vmas(vma); in free_pgtables()
627 unlink_file_vma(vma); in free_pgtables()
629 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
630 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
636 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
638 vma = next; in free_pgtables()
639 next = vma->vm_next; in free_pgtables()
640 unlink_anon_vmas(vma); in free_pgtables()
641 unlink_file_vma(vma); in free_pgtables()
643 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
646 vma = next; in free_pgtables()
726 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
729 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
758 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
759 index = linear_page_index(vma, addr); in print_bad_pte()
767 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
769 vma->vm_file, in print_bad_pte()
770 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
771 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
819 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in _vm_normal_page() argument
827 if (vma->vm_ops && vma->vm_ops->find_special_page) in _vm_normal_page()
828 return vma->vm_ops->find_special_page(vma, addr); in _vm_normal_page()
829 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in _vm_normal_page()
859 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
865 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in _vm_normal_page()
866 if (vma->vm_flags & VM_MIXEDMAP) { in _vm_normal_page()
872 off = (addr - vma->vm_start) >> PAGE_SHIFT; in _vm_normal_page()
873 if (pfn == vma->vm_pgoff + off) in _vm_normal_page()
875 if (!is_cow_mapping(vma->vm_flags)) in _vm_normal_page()
885 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
898 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
908 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
909 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
915 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
916 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
918 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
947 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, in copy_one_pte() argument
950 unsigned long vm_flags = vma->vm_flags; in copy_one_pte()
1038 page = vm_normal_page(vma, addr, pte); in copy_one_pte()
1064 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, in copy_pte_range() argument
1103 vma, addr, rss); in copy_pte_range()
1127 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, in copy_pmd_range() argument
1142 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma); in copy_pmd_range()
1144 dst_pmd, src_pmd, addr, vma); in copy_pmd_range()
1154 vma, addr, next)) in copy_pmd_range()
1161 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma, in copy_pud_range() argument
1176 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma); in copy_pud_range()
1178 dst_pud, src_pud, addr, vma); in copy_pud_range()
1188 vma, addr, next)) in copy_pud_range()
1195 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, in copy_p4d_range() argument
1210 vma, addr, next)) in copy_p4d_range()
1217 struct vm_area_struct *vma) in copy_page_range() argument
1221 unsigned long addr = vma->vm_start; in copy_page_range()
1222 unsigned long end = vma->vm_end; in copy_page_range()
1234 if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && in copy_page_range()
1235 !vma->anon_vma) in copy_page_range()
1238 if (is_vm_hugetlb_page(vma)) in copy_page_range()
1239 return copy_hugetlb_page_range(dst_mm, src_mm, vma); in copy_page_range()
1241 if (unlikely(vma->vm_flags & VM_PFNMAP)) { in copy_page_range()
1246 ret = track_pfn_copy(vma); in copy_page_range()
1257 is_cow = is_cow_mapping(vma->vm_flags); in copy_page_range()
1272 vma, addr, next))) { in copy_page_range()
1284 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1311 page = _vm_normal_page(vma, addr, ptent, true); in zap_pte_range()
1334 likely(!(vma->vm_flags & VM_SEQ_READ))) in zap_pte_range()
1340 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1385 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1414 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1426 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1427 else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1440 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1449 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
1461 VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); in zap_pud_range()
1462 split_huge_pud(vma, pud, addr); in zap_pud_range()
1463 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1469 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1478 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1490 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1497 struct vm_area_struct *vma, in unmap_page_range() argument
1505 tlb_start_vma(tlb, vma); in unmap_page_range()
1506 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1511 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1513 tlb_end_vma(tlb, vma); in unmap_page_range()
1518 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1522 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1525 if (start >= vma->vm_end) in unmap_single_vma()
1527 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1528 if (end <= vma->vm_start) in unmap_single_vma()
1531 if (vma->vm_file) in unmap_single_vma()
1532 uprobe_munmap(vma, start, end); in unmap_single_vma()
1534 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1535 untrack_pfn(vma, 0, 0); in unmap_single_vma()
1538 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1550 if (vma->vm_file) { in unmap_single_vma()
1551 i_mmap_lock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1552 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1553 i_mmap_unlock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1556 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1579 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1582 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1585 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) in unmap_vmas()
1586 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1598 void zap_page_range(struct vm_area_struct *vma, unsigned long start, in zap_page_range() argument
1601 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1609 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) in zap_page_range()
1610 unmap_single_vma(&tlb, vma, start, end, NULL); in zap_page_range()
1624 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1627 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1635 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1651 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1654 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1655 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1658 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1692 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1695 struct mm_struct *mm = vma->vm_mm; in insert_page()
1754 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1757 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1761 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
1762 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1763 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
1764 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_page()
1766 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
1770 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
1773 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1809 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
1813 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
1839 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pfn() argument
1842 return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vm_insert_pfn()
1861 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pfn_prot() argument
1871 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vm_insert_pfn_prot()
1872 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vm_insert_pfn_prot()
1874 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vm_insert_pfn_prot()
1875 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vm_insert_pfn_prot()
1877 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_pfn_prot()
1883 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); in vm_insert_pfn_prot()
1885 ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vm_insert_pfn_prot()
1892 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) in vm_mixed_ok() argument
1895 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
1906 static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in __vm_insert_mixed() argument
1909 pgprot_t pgprot = vma->vm_page_prot; in __vm_insert_mixed()
1911 BUG_ON(!vm_mixed_ok(vma, pfn)); in __vm_insert_mixed()
1913 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
1916 track_pfn_insert(vma, &pgprot, pfn); in __vm_insert_mixed()
1938 return insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
1940 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
1943 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vm_insert_mixed() argument
1946 return __vm_insert_mixed(vma, addr, pfn, false); in vm_insert_mixed()
1957 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
1962 err = __vm_insert_mixed(vma, addr, pfn, true); in vmf_insert_mixed_mkwrite()
2079 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2085 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2107 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range()
2108 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
2110 vma->vm_pgoff = pfn; in remap_pfn_range()
2113 err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2117 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
2122 flush_cache_range(vma, addr, end); in remap_pfn_range()
2132 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); in remap_pfn_range()
2151 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
2170 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2172 pfn += vma->vm_pgoff; in vm_iomap_memory()
2173 pages -= vma->vm_pgoff; in vm_iomap_memory()
2176 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2181 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
2332 …oid cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) in cow_user_page() argument
2357 copy_user_highpage(dst, src, va, vma); in cow_user_page()
2360 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
2362 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
2388 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2410 static void fault_dirty_shared_page(struct vm_area_struct *vma, in fault_dirty_shared_page() argument
2415 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
2437 file_update_time(vma->vm_file); in fault_dirty_shared_page()
2451 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
2462 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2464 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
2465 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
2466 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
2488 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
2489 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
2498 if (unlikely(anon_vma_prepare(vma))) in wp_page_copy()
2502 new_page = alloc_zeroed_user_highpage_movable(vma, in wp_page_copy()
2507 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, in wp_page_copy()
2511 cow_user_page(new_page, old_page, vmf->address, vma); in wp_page_copy()
2535 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
2536 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy()
2537 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
2544 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
2545 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
2547 lru_cache_add_active_or_unevictable(new_page, vma); in wp_page_copy()
2554 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
2602 if (page_copied && (vma->vm_flags & VM_LOCKED)) { in wp_page_copy()
2636 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
2637 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2657 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
2659 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
2664 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
2676 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
2680 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
2700 fault_dirty_shared_page(vma, vmf->page); in wp_page_shared()
2727 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
2729 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
2738 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2756 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_wp_page()
2775 page_move_anon_rmap(vmf->page, vma); in do_wp_page()
2782 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2796 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
2800 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
2806 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
2809 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
2812 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
2813 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
2821 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
2822 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2823 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2902 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
2911 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) in do_swap_page()
2917 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
2925 ret = device_private_entry_fault(vma, vmf->address, entry, in do_swap_page()
2930 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
2938 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
2947 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, in do_swap_page()
2967 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
2978 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
2989 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3007 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3014 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, in do_swap_page()
3023 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3043 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3044 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
3045 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
3047 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
3052 flush_icache_page(vma, page); in do_swap_page()
3055 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3056 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3061 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
3063 lru_cache_add_active_or_unevictable(page, vma); in do_swap_page()
3065 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
3072 (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) in do_swap_page()
3096 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3122 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
3129 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
3142 if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) in do_anonymous_page()
3151 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
3153 vma->vm_page_prot)); in do_anonymous_page()
3154 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3158 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3162 if (userfaultfd_missing(vma)) { in do_anonymous_page()
3170 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
3172 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3176 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, in do_anonymous_page()
3187 entry = mk_pte(page, vma->vm_page_prot); in do_anonymous_page()
3188 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
3191 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3196 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3201 if (userfaultfd_missing(vma)) { in do_anonymous_page()
3208 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
3209 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
3211 lru_cache_add_active_or_unevictable(page, vma); in do_anonymous_page()
3213 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3216 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3237 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
3240 ret = vma->vm_ops->fault(vmf); in __do_fault()
3274 struct vm_area_struct *vma = vmf->vma; in pte_alloc_one_map() local
3279 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in pte_alloc_one_map()
3285 mm_inc_nr_ptes(vma->vm_mm); in pte_alloc_one_map()
3286 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in pte_alloc_one_map()
3289 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { in pte_alloc_one_map()
3316 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3324 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument
3327 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != in transhuge_vma_suitable()
3328 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) in transhuge_vma_suitable()
3330 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in transhuge_vma_suitable()
3337 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
3339 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3344 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
3350 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
3357 if (!transhuge_vma_suitable(vma, haddr)) in do_set_pmd()
3368 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); in do_set_pmd()
3374 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3379 flush_icache_page(vma, page + i); in do_set_pmd()
3381 entry = mk_huge_pmd(page, vma->vm_page_prot); in do_set_pmd()
3383 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
3385 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
3393 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3395 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
3429 struct vm_area_struct *vma = vmf->vma; in alloc_set_pte() local
3454 flush_icache_page(vma, page); in alloc_set_pte()
3455 entry = mk_pte(page, vma->vm_page_prot); in alloc_set_pte()
3457 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in alloc_set_pte()
3459 if (write && !(vma->vm_flags & VM_SHARED)) { in alloc_set_pte()
3460 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in alloc_set_pte()
3461 page_add_new_anon_rmap(page, vma, vmf->address, false); in alloc_set_pte()
3463 lru_cache_add_active_or_unevictable(page, vma); in alloc_set_pte()
3465 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in alloc_set_pte()
3468 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in alloc_set_pte()
3471 update_mmu_cache(vma, vmf->address, vmf->pte); in alloc_set_pte()
3498 !(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3507 if (!(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3508 ret = check_stable_address_space(vmf->vma->vm_mm); in finish_fault()
3591 vmf->address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
3602 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
3606 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, in do_fault_around()
3613 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
3638 struct vm_area_struct *vma = vmf->vma; in do_read_fault() local
3646 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { in do_read_fault()
3665 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
3668 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
3671 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
3675 if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL, in do_cow_fault()
3687 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
3704 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
3715 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
3733 fault_dirty_shared_page(vma, vmf->page); in do_shared_fault()
3745 struct vm_area_struct *vma = vmf->vma; in do_fault() local
3749 if (!vma->vm_ops->fault) in do_fault()
3753 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
3760 pte_free(vma->vm_mm, vmf->prealloc_pte); in do_fault()
3766 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
3778 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
3783 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
3798 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
3809 pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); in do_numa_page()
3810 pte = pte_modify(pte, vma->vm_page_prot); in do_numa_page()
3814 ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); in do_numa_page()
3815 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
3817 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
3844 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
3849 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
3858 migrated = migrate_misplaced_page(page, vma, target_nid); in do_numa_page()
3873 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
3875 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
3876 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
3883 if (vma_is_anonymous(vmf->vma)) in wp_huge_pmd()
3885 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pmd()
3886 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
3889 VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); in wp_huge_pmd()
3890 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
3895 static inline bool vma_is_accessible(struct vm_area_struct *vma) in vma_is_accessible() argument
3897 return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); in vma_is_accessible()
3904 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
3906 if (vmf->vma->vm_ops->huge_fault) in create_huge_pud()
3907 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
3916 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
3918 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pud()
3919 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
3980 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
3989 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
3992 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
4003 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4005 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4014 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
4027 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
4031 .vma = vma, in __handle_mm_fault()
4034 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
4035 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
4038 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
4051 if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4077 if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4093 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
4116 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
4124 count_memcg_event_mm(vma->vm_mm, PGFAULT); in handle_mm_fault()
4129 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
4141 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
4142 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
4144 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
4351 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
4358 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
4361 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
4371 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
4379 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
4382 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
4399 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
4407 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
4432 struct vm_area_struct *vma; in __access_remote_vm() local
4444 gup_flags, &page, &vma, NULL); in __access_remote_vm()
4453 vma = find_vma(mm, addr); in __access_remote_vm()
4454 if (!vma || vma->vm_start > addr) in __access_remote_vm()
4456 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
4457 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
4471 copy_to_user_page(vma, page, addr, in __access_remote_vm()
4475 copy_from_user_page(vma, page, addr, in __access_remote_vm()
4535 struct vm_area_struct *vma; in print_vma_addr() local
4543 vma = find_vma(mm, ip); in print_vma_addr()
4544 if (vma && vma->vm_file) { in print_vma_addr()
4545 struct file *f = vma->vm_file; in print_vma_addr()
4554 vma->vm_start, in print_vma_addr()
4555 vma->vm_end - vma->vm_start); in print_vma_addr()
4674 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
4683 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
4694 struct vm_area_struct *vma; member
4702 addr, copy_arg->vma); in copy_subpage()
4706 unsigned long addr_hint, struct vm_area_struct *vma, in copy_user_huge_page() argument
4714 .vma = vma, in copy_user_huge_page()
4718 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()