Home
last modified time | relevance | path

Searched refs:vm_userfaultfd_ctx (Results 1 – 11 of 11) sorted by relevance

/Linux-v5.15/include/linux/
Duserfaultfd_k.h76 struct vm_userfaultfd_ctx vm_ctx) in is_mergeable_vm_userfaultfd_ctx()
78 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx()
133 struct vm_userfaultfd_ctx *);
134 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
158 struct vm_userfaultfd_ctx vm_ctx) in is_mergeable_vm_userfaultfd_ctx()
207 struct vm_userfaultfd_ctx *ctx) in mremap_userfaultfd_prep()
211 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, in mremap_userfaultfd_complete()
Dmm_types.h304 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
305 struct vm_userfaultfd_ctx { struct
309 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {}) argument
310 struct vm_userfaultfd_ctx {}; struct
387 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; member
Dmm.h2551 struct mempolicy *, struct vm_userfaultfd_ctx);
/Linux-v5.15/fs/
Duserfaultfd.c396 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; in handle_userfault()
613 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { in userfaultfd_event_wait_completion()
614 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in userfaultfd_event_wait_completion()
645 octx = vma->vm_userfaultfd_ctx.ctx; in dup_userfaultfd()
647 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in dup_userfaultfd()
684 vma->vm_userfaultfd_ctx.ctx = ctx; in dup_userfaultfd()
713 struct vm_userfaultfd_ctx *vm_ctx) in mremap_userfaultfd_prep()
717 ctx = vma->vm_userfaultfd_ctx.ctx; in mremap_userfaultfd_prep()
728 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in mremap_userfaultfd_prep()
733 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, in mremap_userfaultfd_complete()
[all …]
/Linux-v5.15/mm/
Dmmap.c1032 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in is_mergeable_vma() argument
1048 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) in is_mergeable_vma()
1082 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in can_vma_merge_before() argument
1084 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && in can_vma_merge_before()
1103 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in can_vma_merge_after() argument
1105 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && in can_vma_merge_after()
1163 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) in vma_merge() argument
1193 vm_userfaultfd_ctx)) { in vma_merge()
1202 vm_userfaultfd_ctx) && in vma_merge()
1225 vm_userfaultfd_ctx)) { in vma_merge()
[all …]
Dmremap.c566 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) in move_vma()
783 unsigned long flags, struct vm_userfaultfd_ctx *uf, in mremap_to()
905 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; in SYSCALL_DEFINE5()
Dmlock.c514 vma->vm_userfaultfd_ctx); in mlock_fixup()
Dmprotect.c467 vma->vm_userfaultfd_ctx); in mprotect_fixup()
Duserfaultfd.c45 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma()
Dmadvise.c141 vma->vm_userfaultfd_ctx); in madvise_behavior()
Dmempolicy.c813 new_pol, vma->vm_userfaultfd_ctx); in mbind_range()