Lines Matching refs:vmf

2577 				 struct vm_fault *vmf)  in cow_user_page()  argument
2583 struct vm_area_struct *vma = vmf->vma; in cow_user_page()
2585 unsigned long addr = vmf->address; in cow_user_page()
2605 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { in cow_user_page()
2608 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2610 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page()
2615 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2620 entry = pte_mkyoung(vmf->orig_pte); in cow_user_page()
2621 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in cow_user_page()
2622 update_mmu_cache(vma, addr, vmf->pte); in cow_user_page()
2636 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2638 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page()
2640 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2664 pte_unmap_unlock(vmf->pte, vmf->ptl); in cow_user_page()
2691 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) in do_page_mkwrite() argument
2694 struct page *page = vmf->page; in do_page_mkwrite()
2695 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2697 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2699 if (vmf->vma->vm_file && in do_page_mkwrite()
2700 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2703 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2705 vmf->flags = old_flags; in do_page_mkwrite()
2725 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
2727 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
2729 struct page *page = vmf->page; in fault_dirty_shared_page()
2759 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
2778 static inline void wp_page_reuse(struct vm_fault *vmf) in wp_page_reuse() argument
2779 __releases(vmf->ptl) in wp_page_reuse()
2781 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
2782 struct page *page = vmf->page; in wp_page_reuse()
2792 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2793 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
2795 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
2796 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
2797 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
2817 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
2819 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
2821 struct page *old_page = vmf->page; in wp_page_copy()
2830 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy()
2832 vmf->address); in wp_page_copy()
2837 vmf->address); in wp_page_copy()
2841 if (!cow_user_page(new_page, old_page, vmf)) { in wp_page_copy()
2862 vmf->address & PAGE_MASK, in wp_page_copy()
2863 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
2869 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
2870 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy()
2880 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
2890 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
2891 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
2898 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
2899 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
2930 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
2936 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
2980 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) in finish_mkwrite_fault() argument
2982 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
2983 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2984 &vmf->ptl); in finish_mkwrite_fault()
2989 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in finish_mkwrite_fault()
2990 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
2991 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
2994 wp_page_reuse(vmf); in finish_mkwrite_fault()
3002 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
3004 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3009 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3010 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3011 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3014 return finish_mkwrite_fault(vmf); in wp_pfn_shared()
3016 wp_page_reuse(vmf); in wp_pfn_shared()
3020 static vm_fault_t wp_page_shared(struct vm_fault *vmf) in wp_page_shared() argument
3021 __releases(vmf->ptl) in wp_page_shared()
3023 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3026 get_page(vmf->page); in wp_page_shared()
3031 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3032 tmp = do_page_mkwrite(vmf); in wp_page_shared()
3035 put_page(vmf->page); in wp_page_shared()
3038 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
3040 unlock_page(vmf->page); in wp_page_shared()
3041 put_page(vmf->page); in wp_page_shared()
3045 wp_page_reuse(vmf); in wp_page_shared()
3046 lock_page(vmf->page); in wp_page_shared()
3048 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
3049 put_page(vmf->page); in wp_page_shared()
3072 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
3073 __releases(vmf->ptl) in do_wp_page()
3075 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3077 if (userfaultfd_pte_wp(vma, *vmf->pte)) { in do_wp_page()
3078 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3079 return handle_userfault(vmf, VM_UFFD_WP); in do_wp_page()
3082 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3083 if (!vmf->page) { in do_wp_page()
3093 return wp_pfn_shared(vmf); in do_wp_page()
3095 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3096 return wp_page_copy(vmf); in do_wp_page()
3103 if (PageAnon(vmf->page)) { in do_wp_page()
3104 struct page *page = vmf->page; in do_wp_page()
3121 wp_page_reuse(vmf); in do_wp_page()
3125 return wp_page_shared(vmf); in do_wp_page()
3131 get_page(vmf->page); in do_wp_page()
3133 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3134 return wp_page_copy(vmf); in do_wp_page()
3241 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
3243 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
3252 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) in do_swap_page()
3255 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
3258 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3259 vmf->address); in do_swap_page()
3261 vmf->page = device_private_entry_to_page(entry); in do_swap_page()
3262 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
3266 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3274 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
3284 vmf->address); in do_swap_page()
3311 vmf); in do_swap_page()
3320 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3321 vmf->address, &vmf->ptl); in do_swap_page()
3322 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3342 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3360 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3372 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3373 &vmf->ptl); in do_swap_page()
3374 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3395 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { in do_swap_page()
3397 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
3402 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
3404 if (pte_swp_uffd_wp(vmf->orig_pte)) { in do_swap_page()
3408 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3409 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3410 vmf->orig_pte = pte; in do_swap_page()
3414 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
3417 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
3438 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
3439 ret |= do_wp_page(vmf); in do_swap_page()
3446 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3448 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3452 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3469 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
3471 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
3490 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3494 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
3498 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
3500 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
3502 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3503 vmf->address, &vmf->ptl); in do_anonymous_page()
3504 if (!pte_none(*vmf->pte)) { in do_anonymous_page()
3505 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
3513 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3514 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3522 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3542 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3543 &vmf->ptl); in do_anonymous_page()
3544 if (!pte_none(*vmf->pte)) { in do_anonymous_page()
3545 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3555 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3557 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3561 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
3564 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3567 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3569 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3585 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
3587 struct vm_area_struct *vma = vmf->vma; in __do_fault()
3605 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
3606 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
3607 if (!vmf->prealloc_pte) in __do_fault()
3612 ret = vma->vm_ops->fault(vmf); in __do_fault()
3617 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
3619 unlock_page(vmf->page); in __do_fault()
3620 put_page(vmf->page); in __do_fault()
3621 vmf->page = NULL; in __do_fault()
3626 lock_page(vmf->page); in __do_fault()
3628 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); in __do_fault()
3644 static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf) in pte_alloc_one_map() argument
3646 struct vm_area_struct *vma = vmf->vma; in pte_alloc_one_map()
3648 if (!pmd_none(*vmf->pmd)) in pte_alloc_one_map()
3650 if (vmf->prealloc_pte) { in pte_alloc_one_map()
3651 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in pte_alloc_one_map()
3652 if (unlikely(!pmd_none(*vmf->pmd))) { in pte_alloc_one_map()
3653 spin_unlock(vmf->ptl); in pte_alloc_one_map()
3658 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in pte_alloc_one_map()
3659 spin_unlock(vmf->ptl); in pte_alloc_one_map()
3660 vmf->prealloc_pte = NULL; in pte_alloc_one_map()
3661 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { in pte_alloc_one_map()
3676 if (pmd_devmap_trans_unstable(vmf->pmd)) in pte_alloc_one_map()
3688 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3689 &vmf->ptl); in pte_alloc_one_map()
3694 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
3696 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
3698 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3704 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
3707 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3709 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
3710 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
3711 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
3727 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
3728 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
3729 if (!vmf->prealloc_pte) in do_set_pmd()
3734 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3735 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
3751 deposit_prealloc_pte(vmf); in do_set_pmd()
3753 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3755 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
3761 spin_unlock(vmf->ptl); in do_set_pmd()
3765 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3787 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) in alloc_set_pte() argument
3789 struct vm_area_struct *vma = vmf->vma; in alloc_set_pte()
3790 bool write = vmf->flags & FAULT_FLAG_WRITE; in alloc_set_pte()
3794 if (pmd_none(*vmf->pmd) && PageTransCompound(page)) { in alloc_set_pte()
3795 ret = do_set_pmd(vmf, page); in alloc_set_pte()
3800 if (!vmf->pte) { in alloc_set_pte()
3801 ret = pte_alloc_one_map(vmf); in alloc_set_pte()
3807 if (unlikely(!pte_none(*vmf->pte))) { in alloc_set_pte()
3808 update_mmu_tlb(vma, vmf->address, vmf->pte); in alloc_set_pte()
3820 page_add_new_anon_rmap(page, vma, vmf->address, false); in alloc_set_pte()
3826 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in alloc_set_pte()
3829 update_mmu_cache(vma, vmf->address, vmf->pte); in alloc_set_pte()
3850 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
3856 if ((vmf->flags & FAULT_FLAG_WRITE) && in finish_fault()
3857 !(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3858 page = vmf->cow_page; in finish_fault()
3860 page = vmf->page; in finish_fault()
3866 if (!(vmf->vma->vm_flags & VM_SHARED)) in finish_fault()
3867 ret = check_stable_address_space(vmf->vma->vm_mm); in finish_fault()
3869 ret = alloc_set_pte(vmf, page); in finish_fault()
3870 if (vmf->pte) in finish_fault()
3871 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
3935 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
3937 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around()
3938 pgoff_t start_pgoff = vmf->pgoff; in do_fault_around()
3946 vmf->address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
3947 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
3955 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + in do_fault_around()
3957 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
3960 if (pmd_none(*vmf->pmd)) { in do_fault_around()
3961 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
3962 if (!vmf->prealloc_pte) in do_fault_around()
3967 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
3970 if (pmd_trans_huge(*vmf->pmd)) { in do_fault_around()
3976 if (!vmf->pte) in do_fault_around()
3980 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); in do_fault_around()
3981 if (!pte_none(*vmf->pte)) in do_fault_around()
3983 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault_around()
3985 vmf->address = address; in do_fault_around()
3986 vmf->pte = NULL; in do_fault_around()
3990 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
3992 struct vm_area_struct *vma = vmf->vma; in do_read_fault()
4001 ret = do_fault_around(vmf); in do_read_fault()
4006 ret = __do_fault(vmf); in do_read_fault()
4010 ret |= finish_fault(vmf); in do_read_fault()
4011 unlock_page(vmf->page); in do_read_fault()
4013 put_page(vmf->page); in do_read_fault()
4017 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
4019 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
4025 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4026 if (!vmf->cow_page) in do_cow_fault()
4029 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) { in do_cow_fault()
4030 put_page(vmf->cow_page); in do_cow_fault()
4033 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL); in do_cow_fault()
4035 ret = __do_fault(vmf); in do_cow_fault()
4041 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4042 __SetPageUptodate(vmf->cow_page); in do_cow_fault()
4044 ret |= finish_fault(vmf); in do_cow_fault()
4045 unlock_page(vmf->page); in do_cow_fault()
4046 put_page(vmf->page); in do_cow_fault()
4051 put_page(vmf->cow_page); in do_cow_fault()
4055 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
4057 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
4060 ret = __do_fault(vmf); in do_shared_fault()
4069 unlock_page(vmf->page); in do_shared_fault()
4070 tmp = do_page_mkwrite(vmf); in do_shared_fault()
4073 put_page(vmf->page); in do_shared_fault()
4078 ret |= finish_fault(vmf); in do_shared_fault()
4081 unlock_page(vmf->page); in do_shared_fault()
4082 put_page(vmf->page); in do_shared_fault()
4086 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
4098 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
4100 struct vm_area_struct *vma = vmf->vma; in do_fault()
4112 if (unlikely(!pmd_present(*vmf->pmd))) in do_fault()
4115 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4116 vmf->pmd, in do_fault()
4117 vmf->address, in do_fault()
4118 &vmf->ptl); in do_fault()
4126 if (unlikely(pte_none(*vmf->pte))) in do_fault()
4131 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
4133 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
4134 ret = do_read_fault(vmf); in do_fault()
4136 ret = do_cow_fault(vmf); in do_fault()
4138 ret = do_shared_fault(vmf); in do_fault()
4141 if (vmf->prealloc_pte) { in do_fault()
4142 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4143 vmf->prealloc_pte = NULL; in do_fault()
4163 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
4165 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
4172 bool was_writable = pte_savedwrite(vmf->orig_pte); in do_numa_page()
4180 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
4181 spin_lock(vmf->ptl); in do_numa_page()
4182 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { in do_numa_page()
4183 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4191 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4196 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4197 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
4199 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
4201 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4207 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4231 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4233 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4253 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
4255 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
4256 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
4257 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
4258 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
4263 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) in wp_huge_pmd() argument
4265 if (vma_is_anonymous(vmf->vma)) { in wp_huge_pmd()
4266 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd)) in wp_huge_pmd()
4267 return handle_userfault(vmf, VM_UFFD_WP); in wp_huge_pmd()
4268 return do_huge_pmd_wp_page(vmf, orig_pmd); in wp_huge_pmd()
4270 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pmd()
4271 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
4278 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4283 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
4288 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
4290 if (vmf->vma->vm_ops->huge_fault) { in create_huge_pud()
4291 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
4298 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); in create_huge_pud()
4303 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
4307 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
4309 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pud()
4310 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
4330 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
4334 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
4341 vmf->pte = NULL; in handle_pte_fault()
4344 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
4352 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
4353 vmf->orig_pte = *vmf->pte; in handle_pte_fault()
4364 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
4365 pte_unmap(vmf->pte); in handle_pte_fault()
4366 vmf->pte = NULL; in handle_pte_fault()
4370 if (!vmf->pte) { in handle_pte_fault()
4371 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
4372 return do_anonymous_page(vmf); in handle_pte_fault()
4374 return do_fault(vmf); in handle_pte_fault()
4377 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
4378 return do_swap_page(vmf); in handle_pte_fault()
4380 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4381 return do_numa_page(vmf); in handle_pte_fault()
4383 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
4384 spin_lock(vmf->ptl); in handle_pte_fault()
4385 entry = vmf->orig_pte; in handle_pte_fault()
4386 if (unlikely(!pte_same(*vmf->pte, entry))) { in handle_pte_fault()
4387 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4390 if (vmf->flags & FAULT_FLAG_WRITE) { in handle_pte_fault()
4392 return do_wp_page(vmf); in handle_pte_fault()
4396 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4397 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
4398 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4401 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
4409 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
4410 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
4413 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
4426 struct vm_fault vmf = { in __handle_mm_fault() local
4444 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
4445 if (!vmf.pud) in __handle_mm_fault()
4448 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4449 ret = create_huge_pud(&vmf); in __handle_mm_fault()
4453 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
4461 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
4465 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
4471 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4472 if (!vmf.pmd) in __handle_mm_fault()
4476 if (pud_trans_unstable(vmf.pud)) in __handle_mm_fault()
4479 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4480 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
4484 pmd_t orig_pmd = *vmf.pmd; in __handle_mm_fault()
4491 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
4496 return do_huge_pmd_numa_page(&vmf, orig_pmd); in __handle_mm_fault()
4499 ret = wp_huge_pmd(&vmf, orig_pmd); in __handle_mm_fault()
4503 huge_pmd_set_accessed(&vmf, orig_pmd); in __handle_mm_fault()
4509 return handle_pte_fault(&vmf); in __handle_mm_fault()