Lines Matching refs:vmf
2380 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) in do_page_mkwrite() argument
2383 struct page *page = vmf->page; in do_page_mkwrite()
2384 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2386 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2388 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2390 vmf->flags = old_flags; in do_page_mkwrite()
2448 static inline void wp_page_reuse(struct vm_fault *vmf) in wp_page_reuse() argument
2449 __releases(vmf->ptl) in wp_page_reuse()
2451 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
2452 struct page *page = vmf->page; in wp_page_reuse()
2462 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2463 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
2465 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
2466 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
2467 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
2486 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
2488 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
2490 struct page *old_page = vmf->page; in wp_page_copy()
2494 const unsigned long mmun_start = vmf->address & PAGE_MASK; in wp_page_copy()
2501 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy()
2503 vmf->address); in wp_page_copy()
2508 vmf->address); in wp_page_copy()
2511 cow_user_page(new_page, old_page, vmf->address, vma); in wp_page_copy()
2524 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
2525 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy()
2535 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
2544 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
2545 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
2553 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
2554 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
2591 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
2634 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) in finish_mkwrite_fault() argument
2636 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
2637 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2638 &vmf->ptl); in finish_mkwrite_fault()
2643 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in finish_mkwrite_fault()
2644 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
2647 wp_page_reuse(vmf); in finish_mkwrite_fault()
2655 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
2657 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
2662 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
2663 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
2664 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
2667 return finish_mkwrite_fault(vmf); in wp_pfn_shared()
2669 wp_page_reuse(vmf); in wp_pfn_shared()
2673 static vm_fault_t wp_page_shared(struct vm_fault *vmf) in wp_page_shared() argument
2674 __releases(vmf->ptl) in wp_page_shared()
2676 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
2678 get_page(vmf->page); in wp_page_shared()
2683 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
2684 tmp = do_page_mkwrite(vmf); in wp_page_shared()
2687 put_page(vmf->page); in wp_page_shared()
2690 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
2692 unlock_page(vmf->page); in wp_page_shared()
2693 put_page(vmf->page); in wp_page_shared()
2697 wp_page_reuse(vmf); in wp_page_shared()
2698 lock_page(vmf->page); in wp_page_shared()
2700 fault_dirty_shared_page(vma, vmf->page); in wp_page_shared()
2701 put_page(vmf->page); in wp_page_shared()
2724 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
2725 __releases(vmf->ptl) in do_wp_page()
2727 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
2729 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
2730 if (!vmf->page) { in do_wp_page()
2740 return wp_pfn_shared(vmf); in do_wp_page()
2742 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2743 return wp_page_copy(vmf); in do_wp_page()
2750 if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { in do_wp_page()
2752 if (!trylock_page(vmf->page)) { in do_wp_page()
2753 get_page(vmf->page); in do_wp_page()
2754 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2755 lock_page(vmf->page); in do_wp_page()
2756 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_wp_page()
2757 vmf->address, &vmf->ptl); in do_wp_page()
2758 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in do_wp_page()
2759 unlock_page(vmf->page); in do_wp_page()
2760 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2761 put_page(vmf->page); in do_wp_page()
2764 put_page(vmf->page); in do_wp_page()
2766 if (reuse_swap_page(vmf->page, &total_map_swapcount)) { in do_wp_page()
2775 page_move_anon_rmap(vmf->page, vma); in do_wp_page()
2777 unlock_page(vmf->page); in do_wp_page()
2778 wp_page_reuse(vmf); in do_wp_page()
2781 unlock_page(vmf->page); in do_wp_page()
2784 return wp_page_shared(vmf); in do_wp_page()
2790 get_page(vmf->page); in do_wp_page()
2792 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
2793 return wp_page_copy(vmf); in do_wp_page()
2900 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
2902 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
2911 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) in do_swap_page()
2914 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
2917 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
2918 vmf->address); in do_swap_page()
2925 ret = device_private_entry_fault(vma, vmf->address, entry, in do_swap_page()
2926 vmf->flags, vmf->pmd); in do_swap_page()
2930 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
2938 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
2948 vmf->address); in do_swap_page()
2958 vmf); in do_swap_page()
2967 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
2968 vmf->address, &vmf->ptl); in do_swap_page()
2969 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
2989 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3007 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3023 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3024 &vmf->ptl); in do_swap_page()
3025 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3046 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { in do_swap_page()
3048 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
3053 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
3055 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3056 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3057 vmf->orig_pte = pte; in do_swap_page()
3061 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
3065 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
3088 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
3089 ret |= do_wp_page(vmf); in do_swap_page()
3096 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3098 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3103 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3120 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
3122 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
3142 if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) in do_anonymous_page()
3146 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
3150 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
3152 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
3154 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3155 vmf->address, &vmf->ptl); in do_anonymous_page()
3156 if (!pte_none(*vmf->pte)) in do_anonymous_page()
3163 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3164 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3172 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3191 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3192 &vmf->ptl); in do_anonymous_page()
3193 if (!pte_none(*vmf->pte)) in do_anonymous_page()
3202 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3205 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3209 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
3213 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3216 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3218 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3235 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
3237 struct vm_area_struct *vma = vmf->vma; in __do_fault()
3240 ret = vma->vm_ops->fault(vmf); in __do_fault()
3245 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
3247 unlock_page(vmf->page); in __do_fault()
3248 put_page(vmf->page); in __do_fault()
3249 vmf->page = NULL; in __do_fault()
3254 lock_page(vmf->page); in __do_fault()
3256 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); in __do_fault()
3272 static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf) in pte_alloc_one_map() argument
3274 struct vm_area_struct *vma = vmf->vma; in pte_alloc_one_map()
3276 if (!pmd_none(*vmf->pmd)) in pte_alloc_one_map()
3278 if (vmf->prealloc_pte) { in pte_alloc_one_map()
3279 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in pte_alloc_one_map()
3280 if (unlikely(!pmd_none(*vmf->pmd))) { in pte_alloc_one_map()
3281 spin_unlock(vmf->ptl); in pte_alloc_one_map()
3286 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in pte_alloc_one_map()
3287 spin_unlock(vmf->ptl); in pte_alloc_one_map()
3288 vmf->prealloc_pte = NULL; in pte_alloc_one_map()
3289 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { in pte_alloc_one_map()
3304 if (pmd_devmap_trans_unstable(vmf->pmd)) in pte_alloc_one_map()
3316 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3317 &vmf->ptl); in pte_alloc_one_map()
3335 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
3337 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
3339 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3345 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
3348 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3350 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
3351 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
3352 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
3367 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
3368 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); in do_set_pmd()
3369 if (!vmf->prealloc_pte) in do_set_pmd()
3374 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3375 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
3391 deposit_prealloc_pte(vmf); in do_set_pmd()
3393 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3395 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
3401 spin_unlock(vmf->ptl); in do_set_pmd()
3405 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3426 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, in alloc_set_pte() argument
3429 struct vm_area_struct *vma = vmf->vma; in alloc_set_pte()
3430 bool write = vmf->flags & FAULT_FLAG_WRITE; in alloc_set_pte()
3434 if (pmd_none(*vmf->pmd) && PageTransCompound(page) && in alloc_set_pte()
3439 ret = do_set_pmd(vmf, page); in alloc_set_pte()
3444 if (!vmf->pte) { in alloc_set_pte()
3445 ret = pte_alloc_one_map(vmf); in alloc_set_pte()
3451 if (unlikely(!pte_none(*vmf->pte))) in alloc_set_pte()
3461 page_add_new_anon_rmap(page, vma, vmf->address, false); in alloc_set_pte()
3468 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in alloc_set_pte()
3471 update_mmu_cache(vma, vmf->address, vmf->pte); in alloc_set_pte()
3491 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
3497 if ((vmf->flags & FAULT_FLAG_WRITE) && in finish_fault()
3498 !(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3499 page = vmf->cow_page; in finish_fault()
3501 page = vmf->page; in finish_fault()
3507 if (!(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3508 ret = check_stable_address_space(vmf->vma->vm_mm); in finish_fault()
3510 ret = alloc_set_pte(vmf, vmf->memcg, page); in finish_fault()
3511 if (vmf->pte) in finish_fault()
3512 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
3580 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
3582 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around()
3583 pgoff_t start_pgoff = vmf->pgoff; in do_fault_around()
3591 vmf->address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
3592 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
3600 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + in do_fault_around()
3602 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
3605 if (pmd_none(*vmf->pmd)) { in do_fault_around()
3606 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, in do_fault_around()
3607 vmf->address); in do_fault_around()
3608 if (!vmf->prealloc_pte) in do_fault_around()
3613 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
3616 if (pmd_trans_huge(*vmf->pmd)) { in do_fault_around()
3622 if (!vmf->pte) in do_fault_around()
3626 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); in do_fault_around()
3627 if (!pte_none(*vmf->pte)) in do_fault_around()
3629 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault_around()
3631 vmf->address = address; in do_fault_around()
3632 vmf->pte = NULL; in do_fault_around()
3636 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
3638 struct vm_area_struct *vma = vmf->vma; in do_read_fault()
3647 ret = do_fault_around(vmf); in do_read_fault()
3652 ret = __do_fault(vmf); in do_read_fault()
3656 ret |= finish_fault(vmf); in do_read_fault()
3657 unlock_page(vmf->page); in do_read_fault()
3659 put_page(vmf->page); in do_read_fault()
3663 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
3665 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
3671 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
3672 if (!vmf->cow_page) in do_cow_fault()
3675 if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL, in do_cow_fault()
3676 &vmf->memcg, false)) { in do_cow_fault()
3677 put_page(vmf->cow_page); in do_cow_fault()
3681 ret = __do_fault(vmf); in do_cow_fault()
3687 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
3688 __SetPageUptodate(vmf->cow_page); in do_cow_fault()
3690 ret |= finish_fault(vmf); in do_cow_fault()
3691 unlock_page(vmf->page); in do_cow_fault()
3692 put_page(vmf->page); in do_cow_fault()
3697 mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); in do_cow_fault()
3698 put_page(vmf->cow_page); in do_cow_fault()
3702 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
3704 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
3707 ret = __do_fault(vmf); in do_shared_fault()
3716 unlock_page(vmf->page); in do_shared_fault()
3717 tmp = do_page_mkwrite(vmf); in do_shared_fault()
3720 put_page(vmf->page); in do_shared_fault()
3725 ret |= finish_fault(vmf); in do_shared_fault()
3728 unlock_page(vmf->page); in do_shared_fault()
3729 put_page(vmf->page); in do_shared_fault()
3733 fault_dirty_shared_page(vma, vmf->page); in do_shared_fault()
3743 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
3745 struct vm_area_struct *vma = vmf->vma; in do_fault()
3751 else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
3752 ret = do_read_fault(vmf); in do_fault()
3754 ret = do_cow_fault(vmf); in do_fault()
3756 ret = do_shared_fault(vmf); in do_fault()
3759 if (vmf->prealloc_pte) { in do_fault()
3760 pte_free(vma->vm_mm, vmf->prealloc_pte); in do_fault()
3761 vmf->prealloc_pte = NULL; in do_fault()
3781 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
3783 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
3790 bool was_writable = pte_savedwrite(vmf->orig_pte); in do_numa_page()
3798 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
3799 spin_lock(vmf->ptl); in do_numa_page()
3800 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { in do_numa_page()
3801 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3809 pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); in do_numa_page()
3814 ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); in do_numa_page()
3815 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
3817 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
3819 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3825 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3849 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
3851 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
3871 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
3873 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
3874 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
3875 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
3876 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
3881 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) in wp_huge_pmd() argument
3883 if (vma_is_anonymous(vmf->vma)) in wp_huge_pmd()
3884 return do_huge_pmd_wp_page(vmf, orig_pmd); in wp_huge_pmd()
3885 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pmd()
3886 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
3889 VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); in wp_huge_pmd()
3890 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
3900 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
3904 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
3906 if (vmf->vma->vm_ops->huge_fault) in create_huge_pud()
3907 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
3912 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
3916 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
3918 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pud()
3919 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
3939 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
3943 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
3950 vmf->pte = NULL; in handle_pte_fault()
3953 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
3961 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
3962 vmf->orig_pte = *vmf->pte; in handle_pte_fault()
3973 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
3974 pte_unmap(vmf->pte); in handle_pte_fault()
3975 vmf->pte = NULL; in handle_pte_fault()
3979 if (!vmf->pte) { in handle_pte_fault()
3980 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
3981 return do_anonymous_page(vmf); in handle_pte_fault()
3983 return do_fault(vmf); in handle_pte_fault()
3986 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
3987 return do_swap_page(vmf); in handle_pte_fault()
3989 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
3990 return do_numa_page(vmf); in handle_pte_fault()
3992 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
3993 spin_lock(vmf->ptl); in handle_pte_fault()
3994 entry = vmf->orig_pte; in handle_pte_fault()
3995 if (unlikely(!pte_same(*vmf->pte, entry))) in handle_pte_fault()
3997 if (vmf->flags & FAULT_FLAG_WRITE) { in handle_pte_fault()
3999 return do_wp_page(vmf); in handle_pte_fault()
4003 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4004 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
4005 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4013 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
4014 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
4017 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
4030 struct vm_fault vmf = { in __handle_mm_fault() local
4048 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
4049 if (!vmf.pud) in __handle_mm_fault()
4051 if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4052 ret = create_huge_pud(&vmf); in __handle_mm_fault()
4056 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
4064 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
4068 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
4074 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4075 if (!vmf.pmd) in __handle_mm_fault()
4077 if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4078 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
4082 pmd_t orig_pmd = *vmf.pmd; in __handle_mm_fault()
4089 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
4094 return do_huge_pmd_numa_page(&vmf, orig_pmd); in __handle_mm_fault()
4097 ret = wp_huge_pmd(&vmf, orig_pmd); in __handle_mm_fault()
4101 huge_pmd_set_accessed(&vmf, orig_pmd); in __handle_mm_fault()
4107 return handle_pte_fault(&vmf); in __handle_mm_fault()