/Linux-v4.19/mm/ |
D | vmacache.c | 39 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; in vmacache_update() 73 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find() 106 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact()
|
D | gup.c | 660 struct vm_area_struct **vmas, int *nonblocking) in __get_user_pages() argument 701 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages() 749 if (vmas) { in __get_user_pages() 750 vmas[i] = vma; in __get_user_pages() 869 struct vm_area_struct **vmas, in __get_user_pages_locked() argument 878 BUG_ON(vmas); in __get_user_pages_locked() 890 vmas, locked); in __get_user_pages_locked() 1079 struct vm_area_struct **vmas, int *locked) in get_user_pages_remote() argument 1081 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, in get_user_pages_remote() 1096 struct vm_area_struct **vmas) in get_user_pages() argument [all …]
|
D | nommu.c | 116 struct vm_area_struct **vmas, int *nonblocking) in __get_user_pages() argument 145 if (vmas) in __get_user_pages() 146 vmas[i] = vma; in __get_user_pages() 165 struct vm_area_struct **vmas) in get_user_pages() argument 168 gup_flags, pages, vmas, NULL); in get_user_pages() 729 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
|
D | hugetlb.c | 4161 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page() argument 4276 if (vmas) in follow_hugetlb_page() 4277 vmas[i] = vma; in follow_hugetlb_page()
|
/Linux-v4.19/include/linux/ |
D | vmacache.h | 10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); in vmacache_flush()
|
D | mm_types_task.h | 36 struct vm_area_struct *vmas[VMACACHE_SIZE]; member
|
D | mm.h | 1453 struct vm_area_struct **vmas, int *locked); 1456 struct vm_area_struct **vmas); 1464 struct vm_area_struct **vmas); 1468 struct page **pages, struct vm_area_struct **vmas) in get_user_pages_longterm() argument 1470 return get_user_pages(start, nr_pages, gup_flags, pages, vmas); in get_user_pages_longterm()
|
/Linux-v4.19/drivers/gpu/drm/i915/selftests/ |
D | mock_gem_device.c | 84 kmem_cache_destroy(i915->vmas); in mock_device_release() 205 i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); in mock_gem_device() 206 if (!i915->vmas) in mock_gem_device() 259 kmem_cache_destroy(i915->vmas); in mock_gem_device()
|
/Linux-v4.19/drivers/gpu/drm/msm/ |
D | msm_gem.h | 83 struct list_head vmas; /* list of msm_gem_vma */ member
|
D | msm_gem.c | 315 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma() 328 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma() 354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova() 748 list_for_each_entry(vma, &msm_obj->vmas, list) in msm_gem_describe() 889 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
|
/Linux-v4.19/drivers/video/fbdev/vermilion/ |
D | vermilion.h | 224 atomic_t vmas; member
|
/Linux-v4.19/drivers/gpu/drm/v3d/ |
D | v3d_drv.h | 130 struct list_head vmas; /* list of v3d_vma */ member
|
D | v3d_bo.c | 120 INIT_LIST_HEAD(&bo->vmas); in v3d_bo_create_struct()
|
/Linux-v4.19/kernel/debug/ |
D | debug_core.c | 236 if (!current->vmacache.vmas[i]) in kgdb_flush_swbreak_addr() 238 flush_cache_range(current->vmacache.vmas[i], in kgdb_flush_swbreak_addr()
|
/Linux-v4.19/drivers/gpu/drm/i915/ |
D | i915_vma.c | 139 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); in vma_create() 224 kmem_cache_free(vm->i915->vmas, vma); in vma_create() 814 kmem_cache_free(i915->vmas, vma); in __i915_vma_destroy()
|
D | i915_gem.c | 3476 kmem_cache_shrink(i915->vmas); in shrink_caches() 5705 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); in i915_gem_init_early() 5706 if (!dev_priv->vmas) in i915_gem_init_early() 5760 kmem_cache_destroy(dev_priv->vmas); in i915_gem_init_early() 5779 kmem_cache_destroy(dev_priv->vmas); in i915_gem_cleanup_early()
|
D | i915_drv.h | 1558 struct kmem_cache *vmas; member
|
D | i915_gem_gtt.c | 2105 vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); in pd_vma_create()
|
/Linux-v4.19/drivers/vfio/ |
D | vfio_iommu_type1.c | 345 struct vm_area_struct *vmas[1]; in vaddr_get_pfn() local 354 ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); in vaddr_get_pfn() 357 vmas, NULL); in vaddr_get_pfn() 365 if (ret > 0 && vma_is_fsdax(vmas[0])) { in vaddr_get_pfn()
|
/Linux-v4.19/Documentation/ |
D | robust-futexes.txt | 58 FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether 69 microsecond on Linux, but with thousands (or tens of thousands) of vmas
|
/Linux-v4.19/Documentation/admin-guide/mm/ |
D | userfaultfd.rst | 35 operations never involve heavyweight structures like vmas (in fact the 40 Terabytes. Too many vmas would be needed for that.
|