Lines Matching full:mem
72 struct kgd_mem *mem) in kfd_mem_is_attached() argument
76 list_for_each_entry(entry, &mem->attachments, list) in kfd_mem_is_attached()
90 uint64_t mem; in amdgpu_amdkfd_gpuvm_init_mem_limits() local
93 mem = si.freeram - si.freehigh; in amdgpu_amdkfd_gpuvm_init_mem_limits()
94 mem *= si.mem_unit; in amdgpu_amdkfd_gpuvm_init_mem_limits()
97 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); in amdgpu_amdkfd_gpuvm_init_mem_limits()
98 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); in amdgpu_amdkfd_gpuvm_init_mem_limits()
206 "adev reference can't be null when alloc mem flags vram is set"); in amdgpu_amdkfd_unreserve_mem_limit()
245 * @mem: BO of peer device that is being DMA mapped. Provides parameters
251 struct kgd_mem *mem, struct amdgpu_bo **bo_out) in create_dmamap_sg_bo() argument
256 ret = amdgpu_bo_reserve(mem->bo, false); in create_dmamap_sg_bo()
261 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align, in create_dmamap_sg_bo()
263 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj); in create_dmamap_sg_bo()
265 amdgpu_bo_unreserve(mem->bo); in create_dmamap_sg_bo()
273 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); in create_dmamap_sg_bo()
404 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) in get_pte_flags() argument
406 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in get_pte_flags()
407 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; in get_pte_flags()
408 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; in get_pte_flags()
414 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags()
416 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags()
422 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { in get_pte_flags()
455 if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) in get_pte_flags()
498 kfd_mem_dmamap_userptr(struct kgd_mem *mem, in kfd_mem_dmamap_userptr() argument
502 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr()
507 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr()
563 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
588 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmamap_sg_bo() argument
601 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo()
607 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_sg_bo()
609 dma_addr = mem->bo->tbo.sg->sgl->dma_address; in kfd_mem_dmamap_sg_bo()
610 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
613 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmamap_sg_bo()
619 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
637 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, in kfd_mem_dmamap_sg_bo()
643 kfd_mem_dmamap_attachment(struct kgd_mem *mem, in kfd_mem_dmamap_attachment() argument
650 return kfd_mem_dmamap_userptr(mem, attachment); in kfd_mem_dmamap_attachment()
654 return kfd_mem_dmamap_sg_bo(mem, attachment); in kfd_mem_dmamap_attachment()
662 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, in kfd_mem_dmaunmap_userptr() argument
666 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_userptr()
697 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
712 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, in kfd_mem_dmaunmap_sg_bo() argument
729 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_sg_bo()
740 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, in kfd_mem_dmaunmap_attachment() argument
747 kfd_mem_dmaunmap_userptr(mem, attachment); in kfd_mem_dmaunmap_attachment()
753 kfd_mem_dmaunmap_sg_bo(mem, attachment); in kfd_mem_dmaunmap_attachment()
761 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach_dmabuf() argument
767 if (!mem->dmabuf) { in kfd_mem_attach_dmabuf()
768 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, in kfd_mem_attach_dmabuf()
769 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_attach_dmabuf()
771 if (IS_ERR(mem->dmabuf)) { in kfd_mem_attach_dmabuf()
772 ret = PTR_ERR(mem->dmabuf); in kfd_mem_attach_dmabuf()
773 mem->dmabuf = NULL; in kfd_mem_attach_dmabuf()
778 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); in kfd_mem_attach_dmabuf()
801 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, in kfd_mem_attach() argument
804 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_attach()
805 unsigned long bo_size = mem->bo->tbo.base.size; in kfd_mem_attach()
806 uint64_t va = mem->va; in kfd_mem_attach()
826 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || in kfd_mem_attach()
827 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || in kfd_mem_attach()
828 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { in kfd_mem_attach()
829 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) in kfd_mem_attach()
845 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || in kfd_mem_attach()
846 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) || in kfd_mem_attach()
853 bo[i] = mem->bo; in kfd_mem_attach()
860 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in kfd_mem_attach()
863 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
867 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { in kfd_mem_attach()
868 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || in kfd_mem_attach()
869 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), in kfd_mem_attach()
872 ret = create_dmamap_sg_bo(adev, mem, &bo[i]); in kfd_mem_attach()
876 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || in kfd_mem_attach()
877 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { in kfd_mem_attach()
879 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); in kfd_mem_attach()
904 attachment[i]->pte_flags = get_pte_flags(adev, mem); in kfd_mem_attach()
906 list_add(&attachment[i]->list, &mem->attachments); in kfd_mem_attach()
942 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, in add_kgd_mem_to_kfd_bo_list() argument
946 struct ttm_validate_buffer *entry = &mem->validate_list; in add_kgd_mem_to_kfd_bo_list()
947 struct amdgpu_bo *bo = mem->bo; in add_kgd_mem_to_kfd_bo_list()
960 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, in remove_kgd_mem_from_kfd_bo_list() argument
965 bo_list_entry = &mem->validate_list; in remove_kgd_mem_from_kfd_bo_list()
983 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, in init_user_pages() argument
986 struct amdkfd_process_info *process_info = mem->process_info; in init_user_pages()
987 struct amdgpu_bo *bo = mem->bo; in init_user_pages()
1014 atomic_inc(&mem->invalid); in init_user_pages()
1030 amdgpu_bo_placement_from_domain(bo, mem->domain); in init_user_pages()
1069 * @mem: KFD BO structure.
1073 static int reserve_bo_and_vm(struct kgd_mem *mem, in reserve_bo_and_vm() argument
1077 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_vm()
1084 ctx->sync = &mem->sync; in reserve_bo_and_vm()
1115 * @mem: KFD BO structure.
1123 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, in reserve_bo_and_cond_vms() argument
1127 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_cond_vms()
1135 ctx->sync = &mem->sync; in reserve_bo_and_cond_vms()
1140 list_for_each_entry(entry, &mem->attachments, list) { in reserve_bo_and_cond_vms()
1162 list_for_each_entry(entry, &mem->attachments, list) { in reserve_bo_and_cond_vms()
1216 static void unmap_bo_from_gpuvm(struct kgd_mem *mem, in unmap_bo_from_gpuvm() argument
1230 kfd_mem_dmaunmap_attachment(mem, entry); in unmap_bo_from_gpuvm()
1233 static int update_gpuvm_pte(struct kgd_mem *mem, in update_gpuvm_pte() argument
1241 ret = kfd_mem_dmamap_attachment(mem, entry); in update_gpuvm_pte()
1255 static int map_bo_to_gpuvm(struct kgd_mem *mem, in map_bo_to_gpuvm() argument
1275 ret = update_gpuvm_pte(mem, entry, sync); in map_bo_to_gpuvm()
1284 unmap_bo_from_gpuvm(mem, entry, sync); in map_bo_to_gpuvm()
1629 void *drm_priv, struct kgd_mem **mem, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() argument
1675 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1676 if (!*mem) { in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1680 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1681 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1682 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1688 if ((*mem)->aql_queue) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1691 (*mem)->alloc_flags = flags; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1693 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1721 bo->kfd_bo = *mem; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1722 (*mem)->bo = bo; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1726 (*mem)->va = va; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1727 (*mem)->domain = domain; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1728 (*mem)->mapped_to_gpu_memory = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1729 (*mem)->process_info = avm->process_info; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1730 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1734 ret = init_user_pages(*mem, user_addr, criu_resume); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1755 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1758 /* Don't unreserve system mem limit twice */ in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1763 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1767 kfree(*mem); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1777 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu() argument
1780 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1781 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1782 bool use_release_notifier = (mem->bo->kfd_bo == mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1790 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1793 if (mem->alloc_flags & in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1796 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1799 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1800 is_imported = mem->is_imported; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1801 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1802 /* lock is not needed after this, since mem is unused and will in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1808 mem->va, bo_size); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1813 bo_list_entry = &mem->validate_list; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1819 amdgpu_mn_unregister(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1821 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1829 amdgpu_amdkfd_remove_eviction_fence(mem->bo, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1831 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1832 mem->va + bo_size * (1 + mem->aql_queue)); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1835 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1841 amdgpu_sync_free(&mem->sync); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1846 if (mem->bo->tbo.sg) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1847 sg_free_table(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1848 kfree(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1855 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1863 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1864 if (mem->dmabuf) in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1865 dma_buf_put(mem->dmabuf); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1866 mutex_destroy(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1869 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1872 drm_gem_object_put(&mem->bo->tbo.base); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1879 kfree(mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1885 struct amdgpu_device *adev, struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() argument
1897 bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1907 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1915 is_invalid_userptr = atomic_read(&mem->invalid); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1919 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1921 domain = mem->domain; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1925 mem->va, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1926 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1929 if (!kfd_mem_is_attached(avm, mem)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1930 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1935 ret = reserve_bo_and_vm(mem, avm, &ctx); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1952 if (mem->mapped_to_gpu_memory == 0 && in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1965 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1972 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1986 mem->mapped_to_gpu_memory++; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
1988 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2002 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2003 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2008 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu() argument
2012 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2017 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2019 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2033 mem->va, in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2034 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2037 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2044 unmap_bo_from_gpuvm(mem, entry, ctx.sync); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2047 mem->mapped_to_gpu_memory--; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2049 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2055 if (mem->mapped_to_gpu_memory == 0 && in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2056 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2057 !mem->bo->tbo.pin_count) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2058 amdgpu_amdkfd_remove_eviction_fence(mem->bo, in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2064 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2069 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) in amdgpu_amdkfd_gpuvm_sync_memory() argument
2076 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2077 amdgpu_sync_clone(&mem->sync, &sync); in amdgpu_amdkfd_gpuvm_sync_memory()
2078 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2135 * @mem: Buffer object to be mapped for CPU access
2146 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() argument
2150 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2157 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2178 bo, mem->process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2185 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2193 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2200 * @mem: Buffer object to be unmapped for CPU access
2206 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel() argument
2208 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel()
2217 struct kfd_vm_fault_info *mem) in amdgpu_amdkfd_gpuvm_get_vm_fault_info() argument
2220 *mem = *adev->gmc.vm_fault_info; in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2230 struct kgd_mem **mem, uint64_t *size, in amdgpu_amdkfd_gpuvm_import_dmabuf() argument
2253 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_gpuvm_import_dmabuf()
2254 if (!*mem) in amdgpu_amdkfd_gpuvm_import_dmabuf()
2259 kfree(mem); in amdgpu_amdkfd_gpuvm_import_dmabuf()
2269 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_gpuvm_import_dmabuf()
2270 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_import_dmabuf()
2272 (*mem)->alloc_flags = in amdgpu_amdkfd_gpuvm_import_dmabuf()
2279 (*mem)->bo = bo; in amdgpu_amdkfd_gpuvm_import_dmabuf()
2280 (*mem)->va = va; in amdgpu_amdkfd_gpuvm_import_dmabuf()
2281 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? in amdgpu_amdkfd_gpuvm_import_dmabuf()
2283 (*mem)->mapped_to_gpu_memory = 0; in amdgpu_amdkfd_gpuvm_import_dmabuf()
2284 (*mem)->process_info = avm->process_info; in amdgpu_amdkfd_gpuvm_import_dmabuf()
2285 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); in amdgpu_amdkfd_gpuvm_import_dmabuf()
2286 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_import_dmabuf()
2287 (*mem)->is_imported = true; in amdgpu_amdkfd_gpuvm_import_dmabuf()
2303 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, in amdgpu_amdkfd_evict_userptr() argument
2306 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_evict_userptr()
2314 atomic_inc(&mem->invalid); in amdgpu_amdkfd_evict_userptr()
2337 struct kgd_mem *mem, *tmp_mem; in update_invalid_user_pages() local
2345 list_for_each_entry_safe(mem, tmp_mem, in update_invalid_user_pages()
2348 if (!atomic_read(&mem->invalid)) in update_invalid_user_pages()
2351 bo = mem->bo; in update_invalid_user_pages()
2364 list_move_tail(&mem->validate_list.head, in update_invalid_user_pages()
2372 list_for_each_entry(mem, &process_info->userptr_inval_list, in update_invalid_user_pages()
2376 invalid = atomic_read(&mem->invalid); in update_invalid_user_pages()
2383 bo = mem->bo; in update_invalid_user_pages()
2412 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) in update_invalid_user_pages()
2433 struct kgd_mem *mem, *tmp_mem; in validate_invalid_user_pages() local
2457 list_for_each_entry(mem, &process_info->userptr_inval_list, in validate_invalid_user_pages()
2459 list_add_tail(&mem->resv_list.head, &resv_list); in validate_invalid_user_pages()
2460 mem->resv_list.bo = mem->validate_list.bo; in validate_invalid_user_pages()
2461 mem->resv_list.num_shared = mem->validate_list.num_shared; in validate_invalid_user_pages()
2477 list_for_each_entry_safe(mem, tmp_mem, in validate_invalid_user_pages()
2482 bo = mem->bo; in validate_invalid_user_pages()
2486 amdgpu_bo_placement_from_domain(bo, mem->domain); in validate_invalid_user_pages()
2494 list_move_tail(&mem->validate_list.head, in validate_invalid_user_pages()
2503 list_for_each_entry(attachment, &mem->attachments, list) { in validate_invalid_user_pages()
2507 kfd_mem_dmaunmap_attachment(mem, attachment); in validate_invalid_user_pages()
2508 ret = update_gpuvm_pte(mem, attachment, &sync); in validate_invalid_user_pages()
2512 atomic_inc(&mem->invalid); in validate_invalid_user_pages()
2630 struct kgd_mem *mem; in amdgpu_amdkfd_gpuvm_restore_process_bos() local
2658 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2661 list_add_tail(&mem->resv_list.head, &ctx.list); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2662 mem->resv_list.bo = mem->validate_list.bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2663 mem->resv_list.num_shared = mem->validate_list.num_shared; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2687 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2690 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2691 uint32_t domain = mem->domain; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2717 list_for_each_entry(attachment, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
2721 kfd_mem_dmaunmap_attachment(mem, attachment); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2722 ret = update_gpuvm_pte(mem, attachment, &sync_obj); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2761 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2763 if (mem->bo->tbo.pin_count) in amdgpu_amdkfd_gpuvm_restore_process_bos()
2766 dma_resv_add_fence(mem->bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2789 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) in amdgpu_amdkfd_add_gws_to_process() argument
2798 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); in amdgpu_amdkfd_add_gws_to_process()
2799 if (!*mem) in amdgpu_amdkfd_add_gws_to_process()
2802 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
2803 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_add_gws_to_process()
2804 (*mem)->bo = amdgpu_bo_ref(gws_bo); in amdgpu_amdkfd_add_gws_to_process()
2805 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; in amdgpu_amdkfd_add_gws_to_process()
2806 (*mem)->process_info = process_info; in amdgpu_amdkfd_add_gws_to_process()
2807 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); in amdgpu_amdkfd_add_gws_to_process()
2808 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
2812 mutex_lock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
2835 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
2843 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
2844 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
2845 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); in amdgpu_amdkfd_add_gws_to_process()
2847 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
2848 kfree(*mem); in amdgpu_amdkfd_add_gws_to_process()
2849 *mem = NULL; in amdgpu_amdkfd_add_gws_to_process()
2853 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) in amdgpu_amdkfd_remove_gws_from_process() argument
2857 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; in amdgpu_amdkfd_remove_gws_from_process()
2877 kfree(mem); in amdgpu_amdkfd_remove_gws_from_process()
2901 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem) in amdgpu_amdkfd_bo_mapped_to_dev() argument
2905 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_bo_mapped_to_dev()
2918 seq_printf(m, "System mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()
2921 seq_printf(m, "TTM mem used %lldM out of %lluM\n", in kfd_debugfs_kfd_mem_limits()