Searched refs:dst_vma (Results 1 – 10 of 10) sorted by relevance
| /Linux-v5.15/mm/ |
| D | userfaultfd.c | 30 struct vm_area_struct *dst_vma; in find_dst_vma() local 32 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 33 if (!dst_vma) in find_dst_vma() 36 if (dst_start < dst_vma->vm_start || in find_dst_vma() 37 dst_start + len > dst_vma->vm_end) in find_dst_vma() 45 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma() 48 return dst_vma; in find_dst_vma() 58 struct vm_area_struct *dst_vma, in mfill_atomic_install_pte() argument 64 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte() 65 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte() [all …]
|
| D | memory.c | 772 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() argument 775 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 857 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 884 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 917 page_add_new_anon_rmap(new_page, dst_vma, addr, false); in copy_present_page() 918 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); in copy_present_page() 922 pte = mk_pte(new_page, dst_vma->vm_page_prot); in copy_present_page() 923 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() 924 if (userfaultfd_pte_wp(dst_vma, *src_pte)) in copy_present_page() 927 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page() [all …]
|
| D | hugetlb.c | 5222 struct vm_area_struct *dst_vma, in hugetlb_mcopy_atomic_pte() argument 5229 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mcopy_atomic_pte() 5230 struct address_space *mapping = dst_vma->vm_file->f_mapping; in hugetlb_mcopy_atomic_pte() 5231 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte() 5233 int vm_shared = dst_vma->vm_flags & VM_SHARED; in hugetlb_mcopy_atomic_pte() 5251 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mcopy_atomic_pte() 5256 page = alloc_huge_page(dst_vma, dst_addr, 0); in hugetlb_mcopy_atomic_pte() 5272 restore_reserve_on_error(h, dst_vma, dst_addr, page); in hugetlb_mcopy_atomic_pte() 5278 page = alloc_huge_page_vma(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte() 5292 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mcopy_atomic_pte() [all …]
|
| D | huge_memory.c | 1029 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1038 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1070 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1122 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd()
|
| D | shmem.c | 2343 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() argument 2349 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 2353 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2416 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, in shmem_mfill_atomic_pte()
|
| /Linux-v5.15/include/linux/ |
| D | shmem_fs.h | 145 struct vm_area_struct *dst_vma, 151 #define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \ argument
|
| D | userfaultfd_k.h | 57 struct vm_area_struct *dst_vma,
|
| D | hugetlb.h | 157 struct vm_area_struct *dst_vma, 338 struct vm_area_struct *dst_vma, in hugetlb_mcopy_atomic_pte() argument
|
| D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
| D | mm.h | 1747 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|