Lines Matching refs:vmf

106 static vm_fault_t do_fault(struct vm_fault *vmf);
107 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
108 static bool vmf_pte_changed(struct vm_fault *vmf);
114 static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() argument
116 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp()
119 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp()
2784 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument
2789 spin_lock(vmf->ptl); in pte_unmap_same()
2790 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); in pte_unmap_same()
2791 spin_unlock(vmf->ptl); in pte_unmap_same()
2794 pte_unmap(vmf->pte); in pte_unmap_same()
2795 vmf->pte = NULL; in pte_unmap_same()
2806 struct vm_fault *vmf) in __wp_page_copy_user() argument
2811 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user()
2813 unsigned long addr = vmf->address; in __wp_page_copy_user()
2836 vmf->pte = NULL; in __wp_page_copy_user()
2837 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { in __wp_page_copy_user()
2840 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
2841 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
2846 if (vmf->pte) in __wp_page_copy_user()
2847 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2852 entry = pte_mkyoung(vmf->orig_pte); in __wp_page_copy_user()
2853 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
2854 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
2864 if (vmf->pte) in __wp_page_copy_user()
2868 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
2869 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
2871 if (vmf->pte) in __wp_page_copy_user()
2872 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
2895 if (vmf->pte) in __wp_page_copy_user()
2896 pte_unmap_unlock(vmf->pte, vmf->ptl); in __wp_page_copy_user()
2923 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) in do_page_mkwrite() argument
2926 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2928 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2930 if (vmf->vma->vm_file && in do_page_mkwrite()
2931 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2934 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2936 vmf->flags = old_flags; in do_page_mkwrite()
2956 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
2958 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
2960 struct folio *folio = page_folio(vmf->page); in fault_dirty_shared_page()
2990 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
3009 static inline void wp_page_reuse(struct vm_fault *vmf) in wp_page_reuse() argument
3010 __releases(vmf->ptl) in wp_page_reuse()
3012 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
3013 struct page *page = vmf->page; in wp_page_reuse()
3016 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); in wp_page_reuse()
3027 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3028 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
3030 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3031 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3032 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
3053 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
3055 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_page_copy()
3056 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
3067 if (vmf->page) in wp_page_copy()
3068 old_folio = page_folio(vmf->page); in wp_page_copy()
3072 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy()
3073 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); in wp_page_copy()
3078 vmf->address, false); in wp_page_copy()
3082 ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); in wp_page_copy()
3098 kmsan_copy_page_meta(&new_folio->page, vmf->page); in wp_page_copy()
3108 vmf->address & PAGE_MASK, in wp_page_copy()
3109 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
3115 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3116 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in wp_page_copy()
3123 ksm_might_unmap_zero_page(mm, vmf->orig_pte); in wp_page_copy()
3126 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3130 if (pte_soft_dirty(vmf->orig_pte)) in wp_page_copy()
3132 if (pte_uffd_wp(vmf->orig_pte)) in wp_page_copy()
3145 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3146 folio_add_new_anon_rmap(new_folio, vma, vmf->address); in wp_page_copy()
3154 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
3155 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3179 page_remove_rmap(vmf->page, vma, false); in wp_page_copy()
3185 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3186 } else if (vmf->pte) { in wp_page_copy()
3187 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3188 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3229 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) in finish_mkwrite_fault() argument
3231 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3232 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3233 &vmf->ptl); in finish_mkwrite_fault()
3234 if (!vmf->pte) in finish_mkwrite_fault()
3240 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { in finish_mkwrite_fault()
3241 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3242 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
3245 wp_page_reuse(vmf); in finish_mkwrite_fault()
3253 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
3255 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3260 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3261 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in wp_pfn_shared()
3262 vma_end_read(vmf->vma); in wp_pfn_shared()
3266 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3267 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3270 return finish_mkwrite_fault(vmf); in wp_pfn_shared()
3272 wp_page_reuse(vmf); in wp_pfn_shared()
3276 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) in wp_page_shared() argument
3277 __releases(vmf->ptl) in wp_page_shared()
3279 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3287 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3288 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in wp_page_shared()
3290 vma_end_read(vmf->vma); in wp_page_shared()
3294 tmp = do_page_mkwrite(vmf, folio); in wp_page_shared()
3300 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
3307 wp_page_reuse(vmf); in wp_page_shared()
3310 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
3338 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
3339 __releases(vmf->ptl) in do_wp_page()
3341 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in do_wp_page()
3342 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3346 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3347 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3348 return handle_userfault(vmf, VM_UFFD_WP); in do_wp_page()
3355 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3356 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3357 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3360 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3362 if (vmf->page) in do_wp_page()
3363 folio = page_folio(vmf->page); in do_wp_page()
3377 if (!vmf->page) in do_wp_page()
3378 return wp_pfn_shared(vmf); in do_wp_page()
3379 return wp_page_shared(vmf, folio); in do_wp_page()
3391 if (PageAnonExclusive(vmf->page)) in do_wp_page()
3424 page_move_anon_rmap(vmf->page, vma); in do_wp_page()
3428 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3431 wp_page_reuse(vmf); in do_wp_page()
3435 if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) { in do_wp_page()
3436 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3437 vma_end_read(vmf->vma); in do_wp_page()
3447 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3452 return wp_page_copy(vmf); in do_wp_page()
3586 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) in remove_device_exclusive_entry() argument
3588 struct folio *folio = page_folio(vmf->page); in remove_device_exclusive_entry()
3589 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry()
3604 ret = folio_lock_or_retry(folio, vmf); in remove_device_exclusive_entry()
3610 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3611 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); in remove_device_exclusive_entry()
3614 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3615 &vmf->ptl); in remove_device_exclusive_entry()
3616 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in remove_device_exclusive_entry()
3617 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
3619 if (vmf->pte) in remove_device_exclusive_entry()
3620 pte_unmap_unlock(vmf->pte, vmf->ptl); in remove_device_exclusive_entry()
3647 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) in pte_marker_clear() argument
3649 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3650 vmf->address, &vmf->ptl); in pte_marker_clear()
3651 if (!vmf->pte) in pte_marker_clear()
3661 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) in pte_marker_clear()
3662 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3663 pte_unmap_unlock(vmf->pte, vmf->ptl); in pte_marker_clear()
3667 static vm_fault_t do_pte_missing(struct vm_fault *vmf) in do_pte_missing() argument
3669 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
3670 return do_anonymous_page(vmf); in do_pte_missing()
3672 return do_fault(vmf); in do_pte_missing()
3679 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) in pte_marker_handle_uffd_wp() argument
3685 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
3686 return pte_marker_clear(vmf); in pte_marker_handle_uffd_wp()
3688 return do_pte_missing(vmf); in pte_marker_handle_uffd_wp()
3691 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) in handle_pte_marker() argument
3693 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); in handle_pte_marker()
3708 return pte_marker_handle_uffd_wp(vmf); in handle_pte_marker()
3722 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
3724 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
3735 if (!pte_unmap_same(vmf)) in do_swap_page()
3738 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
3741 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3742 vmf->address); in do_swap_page()
3744 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
3745 ret = remove_device_exclusive_entry(vmf); in do_swap_page()
3747 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_swap_page()
3757 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
3758 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3759 vmf->address, &vmf->ptl); in do_swap_page()
3760 if (unlikely(!vmf->pte || in do_swap_page()
3761 !pte_same(ptep_get(vmf->pte), in do_swap_page()
3762 vmf->orig_pte))) in do_swap_page()
3769 get_page(vmf->page); in do_swap_page()
3770 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3771 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
3772 put_page(vmf->page); in do_swap_page()
3776 ret = handle_pte_marker(vmf); in do_swap_page()
3778 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3789 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
3799 vma, vmf->address, false); in do_swap_page()
3826 vmf); in do_swap_page()
3837 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3838 vmf->address, &vmf->ptl); in do_swap_page()
3839 if (likely(vmf->pte && in do_swap_page()
3840 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
3858 ret |= folio_lock_or_retry(folio, vmf); in do_swap_page()
3879 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3895 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && in do_swap_page()
3905 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3906 &vmf->ptl); in do_swap_page()
3907 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
3931 exclusive = pte_swp_exclusive(vmf->orig_pte); in do_swap_page()
3975 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
3990 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
3992 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
3997 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
3999 if (pte_swp_uffd_wp(vmf->orig_pte)) in do_swap_page()
4001 vmf->orig_pte = pte; in do_swap_page()
4005 page_add_new_anon_rmap(page, vma, vmf->address); in do_swap_page()
4008 page_add_anon_rmap(page, vma, vmf->address, rmap_flags); in do_swap_page()
4013 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
4014 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
4030 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4031 ret |= do_wp_page(vmf); in do_swap_page()
4038 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_swap_page()
4040 if (vmf->pte) in do_swap_page()
4041 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4047 if (vmf->pte) in do_swap_page()
4048 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4067 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
4069 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); in do_anonymous_page()
4070 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
4083 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4087 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
4089 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
4091 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4092 vmf->address, &vmf->ptl); in do_anonymous_page()
4093 if (!vmf->pte) in do_anonymous_page()
4095 if (vmf_pte_changed(vmf)) { in do_anonymous_page()
4096 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4104 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4105 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
4113 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); in do_anonymous_page()
4133 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
4134 &vmf->ptl); in do_anonymous_page()
4135 if (!vmf->pte) in do_anonymous_page()
4137 if (vmf_pte_changed(vmf)) { in do_anonymous_page()
4138 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4148 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4150 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
4154 folio_add_new_anon_rmap(folio, vma, vmf->address); in do_anonymous_page()
4159 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4162 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_anonymous_page()
4164 if (vmf->pte) in do_anonymous_page()
4165 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
4181 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
4183 struct vm_area_struct *vma = vmf->vma; in __do_fault()
4201 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
4202 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4203 if (!vmf->prealloc_pte) in __do_fault()
4207 ret = vma->vm_ops->fault(vmf); in __do_fault()
4212 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
4213 struct page *page = vmf->page; in __do_fault()
4225 vmf->page = NULL; in __do_fault()
4230 lock_page(vmf->page); in __do_fault()
4232 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); in __do_fault()
4238 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
4240 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
4242 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4248 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
4251 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
4253 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
4254 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
4255 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
4279 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
4280 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4281 if (!vmf->prealloc_pte) in do_set_pmd()
4285 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4286 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
4302 deposit_prealloc_pte(vmf); in do_set_pmd()
4304 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4306 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
4312 spin_unlock(vmf->ptl); in do_set_pmd()
4316 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
4330 void set_pte_range(struct vm_fault *vmf, struct folio *folio, in set_pte_range() argument
4333 struct vm_area_struct *vma = vmf->vma; in set_pte_range()
4334 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); in set_pte_range()
4335 bool write = vmf->flags & FAULT_FLAG_WRITE; in set_pte_range()
4336 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); in set_pte_range()
4361 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
4364 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
4367 static bool vmf_pte_changed(struct vm_fault *vmf) in vmf_pte_changed() argument
4369 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) in vmf_pte_changed()
4370 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); in vmf_pte_changed()
4372 return !pte_none(ptep_get(vmf->pte)); in vmf_pte_changed()
4390 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
4392 struct vm_area_struct *vma = vmf->vma; in finish_fault()
4397 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in finish_fault()
4398 page = vmf->cow_page; in finish_fault()
4400 page = vmf->page; in finish_fault()
4412 if (pmd_none(*vmf->pmd)) { in finish_fault()
4414 ret = do_set_pmd(vmf, page); in finish_fault()
4419 if (vmf->prealloc_pte) in finish_fault()
4420 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4421 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4425 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4426 vmf->address, &vmf->ptl); in finish_fault()
4427 if (!vmf->pte) in finish_fault()
4431 if (likely(!vmf_pte_changed(vmf))) { in finish_fault()
4434 set_pte_range(vmf, folio, page, 1, vmf->address); in finish_fault()
4437 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4441 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
4504 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
4507 pgoff_t pte_off = pte_index(vmf->address); in do_fault_around()
4509 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
4519 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
4521 if (pmd_none(*vmf->pmd)) { in do_fault_around()
4522 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4523 if (!vmf->prealloc_pte) in do_fault_around()
4528 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
4529 vmf->pgoff + from_pte - pte_off, in do_fault_around()
4530 vmf->pgoff + to_pte - pte_off); in do_fault_around()
4537 static inline bool should_fault_around(struct vm_fault *vmf) in should_fault_around() argument
4540 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
4543 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
4550 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
4560 if (should_fault_around(vmf)) { in do_read_fault()
4561 ret = do_fault_around(vmf); in do_read_fault()
4566 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_read_fault()
4567 vma_end_read(vmf->vma); in do_read_fault()
4571 ret = __do_fault(vmf); in do_read_fault()
4575 ret |= finish_fault(vmf); in do_read_fault()
4576 folio = page_folio(vmf->page); in do_read_fault()
4583 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
4585 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
4588 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_cow_fault()
4596 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4597 if (!vmf->cow_page) in do_cow_fault()
4600 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
4602 put_page(vmf->cow_page); in do_cow_fault()
4605 folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL); in do_cow_fault()
4607 ret = __do_fault(vmf); in do_cow_fault()
4613 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4614 __SetPageUptodate(vmf->cow_page); in do_cow_fault()
4616 ret |= finish_fault(vmf); in do_cow_fault()
4617 unlock_page(vmf->page); in do_cow_fault()
4618 put_page(vmf->page); in do_cow_fault()
4623 put_page(vmf->cow_page); in do_cow_fault()
4627 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
4629 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
4633 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_shared_fault()
4638 ret = __do_fault(vmf); in do_shared_fault()
4642 folio = page_folio(vmf->page); in do_shared_fault()
4650 tmp = do_page_mkwrite(vmf, folio); in do_shared_fault()
4658 ret |= finish_fault(vmf); in do_shared_fault()
4666 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
4678 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
4680 struct vm_area_struct *vma = vmf->vma; in do_fault()
4688 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
4689 vmf->address, &vmf->ptl); in do_fault()
4690 if (unlikely(!vmf->pte)) in do_fault()
4700 if (unlikely(pte_none(ptep_get(vmf->pte)))) in do_fault()
4705 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
4707 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
4708 ret = do_read_fault(vmf); in do_fault()
4710 ret = do_cow_fault(vmf); in do_fault()
4712 ret = do_shared_fault(vmf); in do_fault()
4715 if (vmf->prealloc_pte) { in do_fault()
4716 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4717 vmf->prealloc_pte = NULL; in do_fault()
4739 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
4741 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
4755 spin_lock(vmf->ptl); in do_numa_page()
4756 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in do_numa_page()
4757 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4762 old_pte = ptep_get(vmf->pte); in do_numa_page()
4771 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
4774 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
4810 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4816 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4825 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
4826 vmf->address, &vmf->ptl); in do_numa_page()
4827 if (unlikely(!vmf->pte)) in do_numa_page()
4829 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in do_numa_page()
4830 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4845 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4850 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4851 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in do_numa_page()
4852 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4856 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
4858 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd()
4860 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
4862 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
4867 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) in wp_huge_pmd() argument
4869 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd()
4870 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_huge_pmd()
4875 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) in wp_huge_pmd()
4876 return handle_userfault(vmf, VM_UFFD_WP); in wp_huge_pmd()
4877 return do_huge_pmd_wp_page(vmf); in wp_huge_pmd()
4882 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
4889 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4894 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
4898 struct vm_area_struct *vma = vmf->vma; in create_huge_pud()
4903 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
4908 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
4912 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud()
4920 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
4927 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
4947 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
4951 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
4958 vmf->pte = NULL; in handle_pte_fault()
4959 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
4967 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
4968 vmf->address, &vmf->ptl); in handle_pte_fault()
4969 if (unlikely(!vmf->pte)) in handle_pte_fault()
4971 vmf->orig_pte = ptep_get_lockless(vmf->pte); in handle_pte_fault()
4972 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
4974 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
4975 pte_unmap(vmf->pte); in handle_pte_fault()
4976 vmf->pte = NULL; in handle_pte_fault()
4980 if (!vmf->pte) in handle_pte_fault()
4981 return do_pte_missing(vmf); in handle_pte_fault()
4983 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
4984 return do_swap_page(vmf); in handle_pte_fault()
4986 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4987 return do_numa_page(vmf); in handle_pte_fault()
4989 spin_lock(vmf->ptl); in handle_pte_fault()
4990 entry = vmf->orig_pte; in handle_pte_fault()
4991 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { in handle_pte_fault()
4992 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4995 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { in handle_pte_fault()
4997 return do_wp_page(vmf); in handle_pte_fault()
4998 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) in handle_pte_fault()
5002 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
5003 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
5004 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
5005 vmf->pte, 1); in handle_pte_fault()
5008 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
5016 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
5017 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
5018 vmf->pte); in handle_pte_fault()
5021 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
5034 struct vm_fault vmf = { in __handle_mm_fault() local
5053 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
5054 if (!vmf.pud) in __handle_mm_fault()
5057 if (pud_none(*vmf.pud) && in __handle_mm_fault()
5059 ret = create_huge_pud(&vmf); in __handle_mm_fault()
5063 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
5073 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
5077 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
5083 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
5084 if (!vmf.pmd) in __handle_mm_fault()
5088 if (pud_trans_unstable(vmf.pud)) in __handle_mm_fault()
5091 if (pmd_none(*vmf.pmd) && in __handle_mm_fault()
5093 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
5097 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); in __handle_mm_fault()
5099 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { in __handle_mm_fault()
5101 !is_pmd_migration_entry(vmf.orig_pmd)); in __handle_mm_fault()
5102 if (is_pmd_migration_entry(vmf.orig_pmd)) in __handle_mm_fault()
5103 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
5106 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { in __handle_mm_fault()
5107 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
5108 return do_huge_pmd_numa_page(&vmf); in __handle_mm_fault()
5111 !pmd_write(vmf.orig_pmd)) { in __handle_mm_fault()
5112 ret = wp_huge_pmd(&vmf); in __handle_mm_fault()
5116 huge_pmd_set_accessed(&vmf); in __handle_mm_fault()
5122 return handle_pte_fault(&vmf); in __handle_mm_fault()