Searched refs:mmap_lock (Results 1 – 19 of 19) sorted by relevance
7 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),11 init_rwsem(&mm->mmap_lock); in mmap_init_lock()16 down_write(&mm->mmap_lock); in mmap_write_lock()21 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested()26 return down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable()31 return down_write_trylock(&mm->mmap_lock) != 0; in mmap_write_trylock()36 up_write(&mm->mmap_lock); in mmap_write_unlock()41 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade()46 down_read(&mm->mmap_lock); in mmap_read_lock()51 return down_read_killable(&mm->mmap_lock); in mmap_read_lock_killable()[all …]
457 struct rw_semaphore mmap_lock; member
531 spinlock_t mmap_lock; member554 spin_lock(&ucontext->mmap_lock); in remove_mmap()560 spin_unlock(&ucontext->mmap_lock); in remove_mmap()566 spin_unlock(&ucontext->mmap_lock); in remove_mmap()573 spin_lock(&ucontext->mmap_lock); in insert_mmap()577 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
88 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext()102 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext()105 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
1100 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq()1110 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
2271 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp()2288 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp()2758 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq()2763 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
759 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()771 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()849 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs()858 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()861 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs()949 mutex_lock(&q->mmap_lock); in vb2_core_create_bufs()958 mutex_unlock(&q->mmap_lock); in vb2_core_create_bufs()961 mutex_unlock(&q->mmap_lock); in vb2_core_create_bufs()2235 mutex_lock(&q->mmap_lock); in vb2_mmap()2275 mutex_unlock(&q->mmap_lock); in vb2_mmap()[all …]
79 prefetchw(&mm->mmap_lock); in ia64_do_page_fault()
600 struct mutex mmap_lock; member
101 mmap_lock in read (or write) mode to be sure a huge pmd cannot be103 takes the mmap_lock in write mode in addition to the anon_vma lock). If
367 task's mm's mmap_lock for read during the query. The set_mempolicy() and368 mbind() APIs [see below] always acquire the mmap_lock for write when374 we hold them mmap_lock for read. Again, because replacing the task or vma375 policy requires that the mmap_lock be held for write, the policy can't be379 shared memory policy while another task, with a distinct mmap_lock, is
36 ``userfaultfd`` runtime load never takes the mmap_lock for writing).
663 might_lock_read(¤t->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
987 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
3548 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma()3578 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
2697 might_lock_read(¤t->mm->mmap_lock); in internal_get_user_pages_fast()
5033 might_lock_read(¤t->mm->mmap_lock); in __might_fault()
1446 prefetchw(¤t->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()
615 ops mmap_lock PageLocked(page)