Lines Matching refs:dst_vma
31 struct vm_area_struct *dst_vma; in find_dst_vma() local
33 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma()
34 if (!dst_vma) in find_dst_vma()
37 if (dst_start < dst_vma->vm_start || in find_dst_vma()
38 dst_start + len > dst_vma->vm_end) in find_dst_vma()
46 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma()
49 return dst_vma; in find_dst_vma()
59 struct vm_area_struct *dst_vma, in mfill_atomic_install_pte() argument
65 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte()
66 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte()
72 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in mfill_atomic_install_pte()
97 if (vma_is_shmem(dst_vma)) { in mfill_atomic_install_pte()
99 inode = dst_vma->vm_file->f_inode; in mfill_atomic_install_pte()
100 offset = linear_page_index(dst_vma, dst_addr); in mfill_atomic_install_pte()
120 page_add_file_rmap(page, dst_vma, false); in mfill_atomic_install_pte()
122 page_add_new_anon_rmap(page, dst_vma, dst_addr); in mfill_atomic_install_pte()
123 lru_cache_add_inactive_or_unevictable(page, dst_vma); in mfill_atomic_install_pte()
135 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_atomic_install_pte()
144 struct vm_area_struct *dst_vma, in mcopy_atomic_pte() argument
156 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); in mcopy_atomic_pte()
208 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in mcopy_atomic_pte()
221 struct vm_area_struct *dst_vma, in mfill_zeropage_pte() argument
231 dst_vma->vm_page_prot)); in mfill_zeropage_pte()
233 if (dst_vma->vm_file) { in mfill_zeropage_pte()
235 inode = dst_vma->vm_file->f_inode; in mfill_zeropage_pte()
236 offset = linear_page_index(dst_vma, dst_addr); in mfill_zeropage_pte()
247 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_zeropage_pte()
257 struct vm_area_struct *dst_vma, in mcontinue_atomic_pte() argument
261 struct inode *inode = file_inode(dst_vma->vm_file); in mcontinue_atomic_pte()
262 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in mcontinue_atomic_pte()
284 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in mcontinue_atomic_pte()
326 struct vm_area_struct *dst_vma, in __mcopy_atomic_hugetlb() argument
333 int vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
359 vma_hpagesize = vma_kernel_pagesize(dst_vma); in __mcopy_atomic_hugetlb()
373 if (!dst_vma) { in __mcopy_atomic_hugetlb()
375 dst_vma = find_dst_vma(dst_mm, dst_start, len); in __mcopy_atomic_hugetlb()
376 if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) in __mcopy_atomic_hugetlb()
380 if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) in __mcopy_atomic_hugetlb()
383 vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
391 if (unlikely(anon_vma_prepare(dst_vma))) in __mcopy_atomic_hugetlb()
404 idx = linear_page_index(dst_vma, dst_addr); in __mcopy_atomic_hugetlb()
405 mapping = dst_vma->vm_file->f_mapping; in __mcopy_atomic_hugetlb()
408 hugetlb_vma_lock_read(dst_vma); in __mcopy_atomic_hugetlb()
411 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); in __mcopy_atomic_hugetlb()
413 hugetlb_vma_unlock_read(dst_vma); in __mcopy_atomic_hugetlb()
421 hugetlb_vma_unlock_read(dst_vma); in __mcopy_atomic_hugetlb()
426 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, in __mcopy_atomic_hugetlb()
430 hugetlb_vma_unlock_read(dst_vma); in __mcopy_atomic_hugetlb()
449 dst_vma = NULL; in __mcopy_atomic_hugetlb()
479 struct vm_area_struct *dst_vma,
489 struct vm_area_struct *dst_vma, in mfill_atomic_pte() argument
499 return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in mfill_atomic_pte()
513 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte()
515 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, in mfill_atomic_pte()
520 dst_vma, dst_addr); in mfill_atomic_pte()
522 err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, in mfill_atomic_pte()
539 struct vm_area_struct *dst_vma; in __mcopy_atomic() local
578 dst_vma = find_dst_vma(dst_mm, dst_start, len); in __mcopy_atomic()
579 if (!dst_vma) in __mcopy_atomic()
587 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && in __mcopy_atomic()
588 dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic()
596 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) in __mcopy_atomic()
602 if (is_vm_hugetlb_page(dst_vma)) in __mcopy_atomic()
603 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, in __mcopy_atomic()
607 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) in __mcopy_atomic()
609 if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE) in __mcopy_atomic()
618 if (!(dst_vma->vm_flags & VM_SHARED) && in __mcopy_atomic()
619 unlikely(anon_vma_prepare(dst_vma))) in __mcopy_atomic()
656 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in __mcopy_atomic()
725 void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, in uffd_wp_range() argument
732 newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); in uffd_wp_range()
734 newprot = vm_get_page_prot(dst_vma->vm_flags); in uffd_wp_range()
737 change_protection(&tlb, dst_vma, start, start + len, newprot, in uffd_wp_range()
746 struct vm_area_struct *dst_vma; in mwriteprotect_range() local
771 dst_vma = find_dst_vma(dst_mm, start, len); in mwriteprotect_range()
773 if (!dst_vma) in mwriteprotect_range()
775 if (!userfaultfd_wp(dst_vma)) in mwriteprotect_range()
777 if (!vma_can_userfault(dst_vma, dst_vma->vm_flags)) in mwriteprotect_range()
780 if (is_vm_hugetlb_page(dst_vma)) { in mwriteprotect_range()
782 page_mask = vma_kernel_pagesize(dst_vma) - 1; in mwriteprotect_range()
787 uffd_wp_range(dst_mm, dst_vma, start, len, enable_wp); in mwriteprotect_range()