Home
last modified time | relevance | path

Searched refs:mmap_lock (Results 1 – 19 of 19) sorted by relevance

/Linux-v5.10/include/linux/
Dmmap_lock.h7 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
11 init_rwsem(&mm->mmap_lock); in mmap_init_lock()
16 down_write(&mm->mmap_lock); in mmap_write_lock()
21 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested()
26 return down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable()
31 return down_write_trylock(&mm->mmap_lock) != 0; in mmap_write_trylock()
36 up_write(&mm->mmap_lock); in mmap_write_unlock()
41 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade()
46 down_read(&mm->mmap_lock); in mmap_read_lock()
51 return down_read_killable(&mm->mmap_lock); in mmap_read_lock_killable()
[all …]
Dmm_types.h457 struct rw_semaphore mmap_lock; member
/Linux-v5.10/drivers/infiniband/hw/cxgb4/
Diw_cxgb4.h531 spinlock_t mmap_lock; member
554 spin_lock(&ucontext->mmap_lock); in remove_mmap()
560 spin_unlock(&ucontext->mmap_lock); in remove_mmap()
566 spin_unlock(&ucontext->mmap_lock); in remove_mmap()
573 spin_lock(&ucontext->mmap_lock); in insert_mmap()
577 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
Dprovider.c88 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext()
102 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext()
105 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
Dcq.c1100 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq()
1110 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
Dqp.c2271 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp()
2288 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp()
2758 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq()
2763 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
/Linux-v5.10/drivers/media/common/videobuf2/
Dvideobuf2-core.c759 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()
771 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
849 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()
858 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
861 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()
949 mutex_lock(&q->mmap_lock); in vb2_core_create_bufs()
958 mutex_unlock(&q->mmap_lock); in vb2_core_create_bufs()
961 mutex_unlock(&q->mmap_lock); in vb2_core_create_bufs()
2235 mutex_lock(&q->mmap_lock); in vb2_mmap()
2275 mutex_unlock(&q->mmap_lock); in vb2_mmap()
[all …]
/Linux-v5.10/arch/ia64/mm/
Dfault.c79 prefetchw(&mm->mmap_lock); in ia64_do_page_fault()
/Linux-v5.10/include/media/
Dvideobuf2-core.h600 struct mutex mmap_lock; member
/Linux-v5.10/Documentation/vm/
Dtranshuge.rst101 mmap_lock in read (or write) mode to be sure a huge pmd cannot be
103 takes the mmap_lock in write mode in addition to the anon_vma lock). If
/Linux-v5.10/Documentation/admin-guide/mm/
Dnuma_memory_policy.rst367 task's mm's mmap_lock for read during the query. The set_mempolicy() and
368 mbind() APIs [see below] always acquire the mmap_lock for write when
374 we hold them mmap_lock for read. Again, because replacing the task or vma
375 policy requires that the mmap_lock be held for write, the policy can't be
379 shared memory policy while another task, with a distinct mmap_lock, is
Duserfaultfd.rst36 ``userfaultfd`` runtime load never takes the mmap_lock for writing).
/Linux-v5.10/drivers/gpu/drm/etnaviv/
Detnaviv_gem.c663 might_lock_read(&current->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
/Linux-v5.10/mm/
Dmmu_notifier.c987 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
Dmmap.c3548 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma()
3578 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
Dgup.c2697 might_lock_read(&current->mm->mmap_lock); in internal_get_user_pages_fast()
Dmemory.c5033 might_lock_read(&current->mm->mmap_lock); in __might_fault()
/Linux-v5.10/arch/x86/mm/
Dfault.c1446 prefetchw(&current->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()
/Linux-v5.10/Documentation/filesystems/
Dlocking.rst615 ops mmap_lock PageLocked(page)