Searched refs:mm_users (Results 1 – 25 of 40) sorted by relevance
12
364 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()396 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_range()439 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
95 atomic_inc(&mm->mm_users); in mmget()100 return atomic_inc_not_zero(&mm->mm_users); in mmget_not_zero()
150 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) in membarrier_private_expedited()206 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { in sync_runqueues_membarrier_state()
32 .mm_users = ATOMIC_INIT(2),
255 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()305 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
169 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
65 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
833 if (atomic_read(&mm->mm_users) <= 1) in task_will_free_mem()
25 if (atomic_read(&mm->mm_users) == 1) in current_is_single_threaded()
25 BUG_ON(atomic_read(&mm->mm_users) != 1); in init_new_context()
538 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()588 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_range()655 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
85 if (atomic_read(&mm->mm_users) == 0) in flush_tlb_mm()
654 if (atomic_read(&mm->mm_users) <= 1) { in flush_tlb_mm()701 if (atomic_read(&mm->mm_users) <= 1) { in flush_tlb_page()755 if (atomic_read(&mm->mm_users) <= 1) { in flush_icache_user_range()
58 "mm_users" counter that is how many "real address space users" there are,67 released because "mm_users" becomes zero.
298 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) in smp_flush_tlb_mm()
112 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); in main()
528 if (atomic_read(¤t->mm->mm_users) == 0) in flush_tlb_all()556 if (atomic_read(&mm->mm_users) == 0) in fix_range()
1085 if (atomic_read(&mm->mm_users) == 1) { in smp_flush_tlb_mm()1123 if (mm == current->mm && atomic_read(&mm->mm_users) == 1) in smp_flush_tlb_pending()1139 if (mm == current->mm && atomic_read(&mm->mm_users) == 1) in smp_flush_tlb_page()
410 atomic_t mm_users; member
203 if (!atomic_inc_not_zero(&ctx->mm->mm_users)) in get_mem_context()
92 .mm_users = ATOMIC_INIT(2),
1009 atomic_set(&mm->mm_users, 1); in mm_init()1073 VM_BUG_ON(atomic_read(&mm->mm_users)); in __mmput()1099 if (atomic_dec_and_test(&mm->mm_users)) in mmput()1115 if (atomic_dec_and_test(&mm->mm_users)) { in mmput_async()1316 atomic_read(&mm->mm_users) > 1) { in mm_release()
272 if (atomic_read(&tlb->mm->mm_users) < 2 || in hugepd_free()
301 if (atomic_read(&mm->mm_users) == 0) in local_flush_tlb_mm()
379 if (atomic_read(&mm->mm_users) == nr + 1) in zap_threads()