Home
last modified time | relevance | path

Searched refs:vm_userfaultfd_ctx (Results 1 – 11 of 11) sorted by relevance

/Linux-v5.4/include/linux/
Duserfaultfd_k.h45 struct vm_userfaultfd_ctx vm_ctx) in is_mergeable_vm_userfaultfd_ctx()
47 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx()
64 struct vm_userfaultfd_ctx *);
65 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
89 struct vm_userfaultfd_ctx vm_ctx) in is_mergeable_vm_userfaultfd_ctx()
115 struct vm_userfaultfd_ctx *ctx) in mremap_userfaultfd_prep()
119 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, in mremap_userfaultfd_complete()
Dmm_types.h277 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
278 struct vm_userfaultfd_ctx { struct
282 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {}) argument
283 struct vm_userfaultfd_ctx {}; struct
355 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; member
Dmm.h2276 struct mempolicy *, struct vm_userfaultfd_ctx);
/Linux-v5.4/fs/
Duserfaultfd.c381 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; in handle_userfault()
645 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { in userfaultfd_event_wait_completion()
646 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in userfaultfd_event_wait_completion()
676 octx = vma->vm_userfaultfd_ctx.ctx; in dup_userfaultfd()
678 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in dup_userfaultfd()
716 vma->vm_userfaultfd_ctx.ctx = ctx; in dup_userfaultfd()
745 struct vm_userfaultfd_ctx *vm_ctx) in mremap_userfaultfd_prep()
749 ctx = vma->vm_userfaultfd_ctx.ctx; in mremap_userfaultfd_prep()
760 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in mremap_userfaultfd_prep()
765 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, in mremap_userfaultfd_complete()
[all …]
/Linux-v5.4/mm/
Dmmap.c1020 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in is_mergeable_vma() argument
1036 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) in is_mergeable_vma()
1070 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in can_vma_merge_before() argument
1072 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && in can_vma_merge_before()
1091 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in can_vma_merge_after() argument
1093 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && in can_vma_merge_after()
1148 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in vma_merge() argument
1181 vm_userfaultfd_ctx)) { in vma_merge()
1190 vm_userfaultfd_ctx) && in vma_merge()
1213 vm_userfaultfd_ctx)) { in vma_merge()
[all …]
Dmremap.c321 bool *locked, struct vm_userfaultfd_ctx *uf, in move_vma()
500 struct vm_userfaultfd_ctx *uf, in mremap_to()
605 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; in SYSCALL_DEFINE5()
Duserfaultfd.c232 if (!dst_vma->vm_userfaultfd_ctx.ctx) in __mcopy_atomic_hugetlb()
486 if (!dst_vma->vm_userfaultfd_ctx.ctx) in __mcopy_atomic()
Dmprotect.c397 vma->vm_userfaultfd_ctx); in mprotect_fixup()
Dmlock.c538 vma->vm_userfaultfd_ctx); in mlock_fixup()
Dmadvise.c137 vma->vm_userfaultfd_ctx); in madvise_behavior()
Dmempolicy.c762 new_pol, vma->vm_userfaultfd_ctx); in mbind_range()