Searched refs:mmap_lock (Results 1 – 22 of 22) sorted by relevance
12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),65 init_rwsem(&mm->mmap_lock); in mmap_init_lock()71 down_write(&mm->mmap_lock); in mmap_write_lock()78 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested()87 ret = down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable()97 ret = down_write_trylock(&mm->mmap_lock) != 0; in mmap_write_trylock()105 up_write(&mm->mmap_lock); in mmap_write_unlock()111 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade()117 down_read(&mm->mmap_lock); in mmap_read_lock()126 ret = down_read_killable(&mm->mmap_lock); in mmap_read_lock_killable()[all …]
473 struct rw_semaphore mmap_lock; member
526 spinlock_t mmap_lock; member549 spin_lock(&ucontext->mmap_lock); in remove_mmap()555 spin_unlock(&ucontext->mmap_lock); in remove_mmap()561 spin_unlock(&ucontext->mmap_lock); in remove_mmap()568 spin_lock(&ucontext->mmap_lock); in insert_mmap()572 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
88 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext()102 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext()105 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
1109 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq()1119 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
2262 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp()2279 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp()2753 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq()2758 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
759 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()771 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()849 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()858 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()861 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()949 mutex_lock(&q->mmap_lock); in vb2_core_create_bufs()958 mutex_unlock(&q->mmap_lock); in vb2_core_create_bufs()961 mutex_unlock(&q->mmap_lock); in vb2_core_create_bufs()2246 mutex_lock(&q->mmap_lock); in vb2_mmap()2286 mutex_unlock(&q->mmap_lock); in vb2_mmap()[all …]
3 #define TRACE_SYSTEM mmap_lock
79 prefetchw(&mm->mmap_lock); in ia64_do_page_fault()
55 debug.o gup.o mmap_lock.o $(mmu-y)
1003 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
3511 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma()3541 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
2718 might_lock_read(¤t->mm->mmap_lock); in internal_get_user_pages_fast()
5271 might_lock_read(¤t->mm->mmap_lock); in __might_fault()
603 struct mutex mmap_lock; member
96 mmap_lock in read (or write) mode to be sure a huge pmd cannot be98 takes the mmap_lock in write mode in addition to the anon_vma lock). If
374 task's mm's mmap_lock for read during the query. The set_mempolicy() and375 mbind() APIs [see below] always acquire the mmap_lock for write when381 we hold them mmap_lock for read. Again, because replacing the task or vma382 policy requires that the mmap_lock be held for write, the policy can't be386 shared memory policy while another task, with a distinct mmap_lock, is
36 ``userfaultfd`` runtime load never takes the mmap_lock for writing).
216 rwsem_release(¤t->mm->mmap_lock.dep_map, _RET_IP_); in stack_map_get_build_id_offset()
666 might_lock_read(¤t->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
1502 prefetchw(¤t->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()
644 ops mmap_lock PageLocked(page)