Home
last modified time | relevance | path

Searched refs:locked_vm (Results 1 – 22 of 22) sorted by relevance

/Linux-v4.19/drivers/fpga/
Ddfl-afu-dma-region.c57 locked = current->mm->locked_vm + npages; in afu_dma_adjust_locked_vm()
63 current->mm->locked_vm += npages; in afu_dma_adjust_locked_vm()
65 if (WARN_ON_ONCE(npages > current->mm->locked_vm)) in afu_dma_adjust_locked_vm()
66 npages = current->mm->locked_vm; in afu_dma_adjust_locked_vm()
67 current->mm->locked_vm -= npages; in afu_dma_adjust_locked_vm()
72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), in afu_dma_adjust_locked_vm()
/Linux-v4.19/arch/powerpc/mm/
Dmmu_context_iommu.c52 locked = mm->locked_vm + npages; in mm_iommu_adjust_locked_vm()
57 mm->locked_vm += npages; in mm_iommu_adjust_locked_vm()
59 if (WARN_ON_ONCE(npages > mm->locked_vm)) in mm_iommu_adjust_locked_vm()
60 npages = mm->locked_vm; in mm_iommu_adjust_locked_vm()
61 mm->locked_vm -= npages; in mm_iommu_adjust_locked_vm()
68 mm->locked_vm << PAGE_SHIFT, in mm_iommu_adjust_locked_vm()
/Linux-v4.19/arch/powerpc/kvm/
Dbook3s_64_vio.c71 locked = current->mm->locked_vm + stt_pages; in kvmppc_account_memlimit()
76 current->mm->locked_vm += stt_pages; in kvmppc_account_memlimit()
78 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm)) in kvmppc_account_memlimit()
79 stt_pages = current->mm->locked_vm; in kvmppc_account_memlimit()
81 current->mm->locked_vm -= stt_pages; in kvmppc_account_memlimit()
87 current->mm->locked_vm << PAGE_SHIFT, in kvmppc_account_memlimit()
/Linux-v4.19/net/xdp/
Dxdp_umem.c148 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
259 old_npgs = atomic_long_read(&umem->user->locked_vm); in xdp_umem_account_pages()
266 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, in xdp_umem_account_pages()
/Linux-v4.19/include/linux/sched/
Duser.h44 atomic_long_t locked_vm; member
/Linux-v4.19/drivers/vfio/
Dvfio_iommu_spapr_tce.c48 locked = mm->locked_vm + npages; in try_increment_locked_vm()
53 mm->locked_vm += npages; in try_increment_locked_vm()
57 mm->locked_vm << PAGE_SHIFT, in try_increment_locked_vm()
72 if (WARN_ON_ONCE(npages > mm->locked_vm)) in decrement_locked_vm()
73 npages = mm->locked_vm; in decrement_locked_vm()
74 mm->locked_vm -= npages; in decrement_locked_vm()
77 mm->locked_vm << PAGE_SHIFT, in decrement_locked_vm()
Dvfio_iommu_type1.c278 if (mm->locked_vm + npage > limit) in vfio_lock_acct()
284 mm->locked_vm += npage; in vfio_lock_acct()
421 if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { in vfio_pin_pages_remote()
448 current->mm->locked_vm + lock_acct + 1 > limit) { in vfio_pin_pages_remote()
/Linux-v4.19/Documentation/infiniband/
Duser_verbs.txt49 amount of memory pinned in the process's locked_vm, and checks that
53 pinned, so the value of locked_vm may be an overestimate of the
/Linux-v4.19/mm/
Dmremap.c363 mm->locked_vm += new_len >> PAGE_SHIFT; in move_vma()
414 locked = mm->locked_vm << PAGE_SHIFT; in vma_to_resize()
594 mm->locked_vm += pages; in SYSCALL_DEFINE5()
Ddebug.c154 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
Dmmap.c1318 locked += mm->locked_vm; in mlock_future_check()
1804 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
2270 locked = mm->locked_vm + grow; in acct_stack_growth()
2364 mm->locked_vm += grow; in expand_upwards()
2445 mm->locked_vm += grow; in expand_downwards()
2766 if (mm->locked_vm) { in do_munmap()
2770 mm->locked_vm -= vma_pages(tmp); in do_munmap()
3001 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
3073 if (mm->locked_vm) { in exit_mmap()
Dmlock.c565 mm->locked_vm += nr_pages; in mlock_fixup()
690 locked += current->mm->locked_vm; in do_mlock()
/Linux-v4.19/kernel/bpf/
Dsyscall.c178 cur = atomic_long_read(&user->locked_vm); in bpf_map_precharge_memlock()
189 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { in bpf_charge_memlock()
190 atomic_long_sub(pages, &user->locked_vm); in bpf_charge_memlock()
198 atomic_long_sub(pages, &user->locked_vm); in bpf_uncharge_memlock()
1009 user_bufs = atomic_long_add_return(pages, &user->locked_vm); in __bpf_prog_charge()
1011 atomic_long_sub(pages, &user->locked_vm); in __bpf_prog_charge()
1022 atomic_long_sub(pages, &user->locked_vm); in __bpf_prog_uncharge()
/Linux-v4.19/include/linux/
Dmm_types.h402 unsigned long locked_vm; /* Pages that have PG_mlocked set */ member
/Linux-v4.19/Documentation/vm/
Dunevictable-lru.rst358 VMAs against the task's "locked_vm".
487 to be mlocked to the task's "locked_vm". To account for filtered VMAs,
489 callers then subtract a non-negative return value from the task's locked_vm. A
492 memory range accounted as locked_vm, as the protections could be changed later
/Linux-v4.19/drivers/infiniband/hw/usnic/
Dusnic_uiom.c63 umem->mm->locked_vm -= umem->diff; in usnic_uiom_reg_account()
/Linux-v4.19/net/core/
Dskbuff.c908 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages()
912 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != in mm_account_pinned_pages()
929 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
/Linux-v4.19/Documentation/
Dvfio.txt457 mm::locked_vm counter to make sure we do not exceed the rlimit.
/Linux-v4.19/fs/proc/
Dtask_mmu.c61 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
/Linux-v4.19/kernel/
Dfork.c937 mm->locked_vm = 0; in mm_init()
/Linux-v4.19/kernel/events/
Dcore.c5461 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); in perf_mmap_close()
5534 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); in perf_mmap_close()
5676 user_locked = atomic_long_read(&user->locked_vm) + user_extra; in perf_mmap()
5723 atomic_long_add(user_extra, &user->locked_vm); in perf_mmap()
/Linux-v4.19/Documentation/virtual/uml/
DUserModeLinux-HOWTO.txt3323 total_vm = 0, locked_vm = 0, def_flags = 0, cpu_vm_mask = 0, swap_cnt = 0,