| /Linux-v6.6/include/linux/ |
| D | mmap_lock.h | 12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), 65 lockdep_assert_held(&mm->mmap_lock); in mmap_assert_locked() 66 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); in mmap_assert_locked() 71 lockdep_assert_held_write(&mm->mmap_lock); in mmap_assert_write_locked() 72 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); in mmap_assert_write_locked() 102 init_rwsem(&mm->mmap_lock); in mmap_init_lock() 108 down_write(&mm->mmap_lock); in mmap_write_lock() 115 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested() 124 ret = down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable() 133 up_write(&mm->mmap_lock); in mmap_write_unlock() [all …]
|
| D | mm_types.h | 763 struct rw_semaphore mmap_lock; member
|
| /Linux-v6.6/include/trace/events/ |
| D | mmap_lock.h | 3 #define TRACE_SYSTEM mmap_lock 16 DECLARE_EVENT_CLASS(mmap_lock, 43 DEFINE_EVENT_FN(mmap_lock, name, \
|
| /Linux-v6.6/drivers/media/common/videobuf2/ |
| D | videobuf2-core.c | 509 lockdep_assert_held(&q->mmap_lock); in __vb2_queue_free() 773 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 785 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 806 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 808 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 873 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 883 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 886 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 898 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 900 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() [all …]
|
| /Linux-v6.6/tools/perf/util/bpf_skel/ |
| D | lock_contention.bpf.c | 108 struct rw_semaphore mmap_lock; member 249 if (bpf_core_field_exists(mm_new->mmap_lock)) { in check_lock_type() 250 if (&mm_new->mmap_lock == (void *)lock) in check_lock_type()
|
| /Linux-v6.6/kernel/bpf/ |
| D | mmap_unlock_work.h | 60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); in bpf_mmap_unlock_mm()
|
| /Linux-v6.6/mm/ |
| D | init-mm.c | 33 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
|
| D | Makefile | 56 debug.o gup.o mmap_lock.o $(mmu-y)
|
| D | mmu_notifier.c | 995 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
|
| /Linux-v6.6/drivers/infiniband/hw/cxgb4/ |
| D | iw_cxgb4.h | 525 spinlock_t mmap_lock; member 548 spin_lock(&ucontext->mmap_lock); in remove_mmap() 554 spin_unlock(&ucontext->mmap_lock); in remove_mmap() 560 spin_unlock(&ucontext->mmap_lock); in remove_mmap() 567 spin_lock(&ucontext->mmap_lock); in insert_mmap() 571 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
|
| D | provider.c | 89 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext() 103 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext() 106 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
|
| D | cq.c | 1109 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq() 1119 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
|
| D | qp.c | 2262 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp() 2279 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp() 2754 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq() 2759 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
|
| /Linux-v6.6/tools/perf/util/bpf_skel/vmlinux/ |
| D | vmlinux.h | 89 struct rw_semaphore mmap_lock; member
|
| /Linux-v6.6/arch/ia64/mm/ |
| D | fault.c | 79 prefetchw(&mm->mmap_lock); in ia64_do_page_fault()
|
| /Linux-v6.6/Documentation/mm/ |
| D | transhuge.rst | 94 mmap_lock in read (or write) mode to be sure a huge pmd cannot be 96 takes the mmap_lock in write mode in addition to the anon_vma lock). If
|
| /Linux-v6.6/include/media/ |
| D | videobuf2-core.h | 619 struct mutex mmap_lock; member
|
| /Linux-v6.6/Documentation/admin-guide/mm/ |
| D | numa_memory_policy.rst | 372 task's mm's mmap_lock for read during the query. The set_mempolicy() and 373 mbind() APIs [see below] always acquire the mmap_lock for write when 379 we hold them mmap_lock for read. Again, because replacing the task or vma 380 policy requires that the mmap_lock be held for write, the policy can't be 384 shared memory policy while another task, with a distinct mmap_lock, is
|
| D | multigen_lru.rst | 41 theoretically worsen lock contention (mmap_lock). If it is
|
| /Linux-v6.6/arch/x86/kernel/ |
| D | tboot.c | 98 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
|
| /Linux-v6.6/Documentation/kernel-hacking/ |
| D | false-sharing.rst | 48 false sharing. One of these is a rw_semaphore 'mmap_lock' inside
|
| /Linux-v6.6/drivers/gpu/drm/etnaviv/ |
| D | etnaviv_gem.c | 648 might_lock_read(¤t->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
|
| /Linux-v6.6/arch/x86/mm/ |
| D | fault.c | 1522 prefetchw(¤t->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()
|
| /Linux-v6.6/drivers/firmware/efi/ |
| D | efi.c | 67 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
|
| /Linux-v6.6/Documentation/filesystems/ |
| D | porting.rst | 948 mmap_lock held. All in-tree users have been audited and do not seem to 949 depend on the mmap_lock being held, but out of tree users should verify 951 be called with the mmap_lock held.
|