Lines Matching refs:dst_vma

23 			    struct vm_area_struct *dst_vma,  in mcopy_atomic_pte()  argument
39 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); in mcopy_atomic_pte()
72 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in mcopy_atomic_pte()
73 if (dst_vma->vm_flags & VM_WRITE) in mcopy_atomic_pte()
77 if (dst_vma->vm_file) { in mcopy_atomic_pte()
79 inode = dst_vma->vm_file->f_inode; in mcopy_atomic_pte()
80 offset = linear_page_index(dst_vma, dst_addr); in mcopy_atomic_pte()
91 page_add_new_anon_rmap(page, dst_vma, dst_addr, false); in mcopy_atomic_pte()
93 lru_cache_add_active_or_unevictable(page, dst_vma); in mcopy_atomic_pte()
98 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mcopy_atomic_pte()
114 struct vm_area_struct *dst_vma, in mfill_zeropage_pte() argument
124 dst_vma->vm_page_prot)); in mfill_zeropage_pte()
126 if (dst_vma->vm_file) { in mfill_zeropage_pte()
128 inode = dst_vma->vm_file->f_inode; in mfill_zeropage_pte()
129 offset = linear_page_index(dst_vma, dst_addr); in mfill_zeropage_pte()
140 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_zeropage_pte()
174 struct vm_area_struct *dst_vma, in __mcopy_atomic_hugetlb() argument
180 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
181 int vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
208 vma_hpagesize = vma_kernel_pagesize(dst_vma); in __mcopy_atomic_hugetlb()
222 if (!dst_vma) { in __mcopy_atomic_hugetlb()
224 dst_vma = find_vma(dst_mm, dst_start); in __mcopy_atomic_hugetlb()
225 if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) in __mcopy_atomic_hugetlb()
232 if (!dst_vma->vm_userfaultfd_ctx.ctx) in __mcopy_atomic_hugetlb()
235 if (dst_start < dst_vma->vm_start || in __mcopy_atomic_hugetlb()
236 dst_start + len > dst_vma->vm_end) in __mcopy_atomic_hugetlb()
240 if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) in __mcopy_atomic_hugetlb()
243 vm_shared = dst_vma->vm_flags & VM_SHARED; in __mcopy_atomic_hugetlb()
255 if (unlikely(anon_vma_prepare(dst_vma))) in __mcopy_atomic_hugetlb()
259 h = hstate_vma(dst_vma); in __mcopy_atomic_hugetlb()
270 idx = linear_page_index(dst_vma, dst_addr); in __mcopy_atomic_hugetlb()
271 mapping = dst_vma->vm_file->f_mapping; in __mcopy_atomic_hugetlb()
289 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, in __mcopy_atomic_hugetlb()
310 dst_vma = NULL; in __mcopy_atomic_hugetlb()
386 struct vm_area_struct *dst_vma,
395 struct vm_area_struct *dst_vma, in mfill_atomic_pte() argument
413 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte()
415 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, in mfill_atomic_pte()
419 dst_vma, dst_addr); in mfill_atomic_pte()
423 dst_vma, dst_addr, in mfill_atomic_pte()
427 dst_vma, dst_addr); in mfill_atomic_pte()
440 struct vm_area_struct *dst_vma; in __mcopy_atomic() local
478 dst_vma = find_vma(dst_mm, dst_start); in __mcopy_atomic()
479 if (!dst_vma) in __mcopy_atomic()
486 if (!dst_vma->vm_userfaultfd_ctx.ctx) in __mcopy_atomic()
489 if (dst_start < dst_vma->vm_start || in __mcopy_atomic()
490 dst_start + len > dst_vma->vm_end) in __mcopy_atomic()
498 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && in __mcopy_atomic()
499 dst_vma->vm_flags & VM_SHARED)) in __mcopy_atomic()
505 if (is_vm_hugetlb_page(dst_vma)) in __mcopy_atomic()
506 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, in __mcopy_atomic()
509 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) in __mcopy_atomic()
518 if (!(dst_vma->vm_flags & VM_SHARED) && in __mcopy_atomic()
519 unlikely(anon_vma_prepare(dst_vma))) in __mcopy_atomic()
556 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in __mcopy_atomic()