Lines Matching refs:ttm

65 				   struct ttm_tt *ttm,
68 struct ttm_tt *ttm);
239 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); in amdgpu_ttm_map_buffer()
248 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; in amdgpu_ttm_map_buffer()
472 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); in amdgpu_bo_move()
484 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in amdgpu_bo_move()
501 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); in amdgpu_bo_move()
638 struct ttm_tt ttm; member
660 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local
661 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_user_pages()
693 readonly = amdgpu_ttm_tt_is_readonly(ttm); in amdgpu_ttm_tt_get_user_pages()
695 ttm->num_pages, &gtt->range, readonly, in amdgpu_ttm_tt_get_user_pages()
710 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_user_pages_done() argument
712 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_user_pages_done()
719 gtt->userptr, ttm->num_pages); in amdgpu_ttm_tt_get_user_pages_done()
744 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) in amdgpu_ttm_tt_set_user_pages() argument
748 for (i = 0; i < ttm->num_pages; ++i) in amdgpu_ttm_tt_set_user_pages()
749 ttm->pages[i] = pages ? pages[i] : NULL; in amdgpu_ttm_tt_set_user_pages()
758 struct ttm_tt *ttm) in amdgpu_ttm_tt_pin_userptr() argument
761 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_pin_userptr()
768 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in amdgpu_ttm_tt_pin_userptr()
769 (u64)ttm->num_pages << PAGE_SHIFT, in amdgpu_ttm_tt_pin_userptr()
775 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); in amdgpu_ttm_tt_pin_userptr()
780 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in amdgpu_ttm_tt_pin_userptr()
781 ttm->num_pages); in amdgpu_ttm_tt_pin_userptr()
786 kfree(ttm->sg); in amdgpu_ttm_tt_pin_userptr()
787 ttm->sg = NULL; in amdgpu_ttm_tt_pin_userptr()
795 struct ttm_tt *ttm) in amdgpu_ttm_tt_unpin_userptr() argument
798 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_unpin_userptr()
804 if (!ttm->sg || !ttm->sg->sgl) in amdgpu_ttm_tt_unpin_userptr()
808 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); in amdgpu_ttm_tt_unpin_userptr()
809 sg_free_table(ttm->sg); in amdgpu_ttm_tt_unpin_userptr()
815 for (i = 0; i < ttm->num_pages; i++) { in amdgpu_ttm_tt_unpin_userptr()
816 if (ttm->pages[i] != in amdgpu_ttm_tt_unpin_userptr()
821 WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); in amdgpu_ttm_tt_unpin_userptr()
831 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind() local
832 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_gart_bind()
842 gtt->ttm.dma_address, flags); in amdgpu_ttm_gart_bind()
855 ttm->num_pages - page_idx, in amdgpu_ttm_gart_bind()
856 &(gtt->ttm.dma_address[page_idx]), flags); in amdgpu_ttm_gart_bind()
858 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, in amdgpu_ttm_gart_bind()
859 gtt->ttm.dma_address, flags); in amdgpu_ttm_gart_bind()
865 ttm->num_pages, gtt->offset); in amdgpu_ttm_gart_bind()
877 struct ttm_tt *ttm, in amdgpu_ttm_backend_bind() argument
881 struct amdgpu_ttm_tt *gtt = (void*)ttm; in amdgpu_ttm_backend_bind()
892 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); in amdgpu_ttm_backend_bind()
897 } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) { in amdgpu_ttm_backend_bind()
898 if (!ttm->sg) { in amdgpu_ttm_backend_bind()
907 ttm->sg = sgt; in amdgpu_ttm_backend_bind()
910 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in amdgpu_ttm_backend_bind()
911 ttm->num_pages); in amdgpu_ttm_backend_bind()
914 if (!ttm->num_pages) { in amdgpu_ttm_backend_bind()
916 ttm->num_pages, bo_mem, ttm); in amdgpu_ttm_backend_bind()
931 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); in amdgpu_ttm_backend_bind()
935 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, in amdgpu_ttm_backend_bind()
936 gtt->ttm.dma_address, flags); in amdgpu_ttm_backend_bind()
940 ttm->num_pages, gtt->offset); in amdgpu_ttm_backend_bind()
957 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; in amdgpu_ttm_alloc_gart()
988 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); in amdgpu_ttm_alloc_gart()
1017 if (!tbo->ttm) in amdgpu_ttm_recover_gart()
1020 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); in amdgpu_ttm_recover_gart()
1033 struct ttm_tt *ttm) in amdgpu_ttm_backend_unbind() argument
1036 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_backend_unbind()
1041 amdgpu_ttm_tt_unpin_userptr(bdev, ttm); in amdgpu_ttm_backend_unbind()
1042 } else if (ttm->sg && gtt->gobj->import_attach) { in amdgpu_ttm_backend_unbind()
1046 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); in amdgpu_ttm_backend_unbind()
1047 ttm->sg = NULL; in amdgpu_ttm_backend_unbind()
1057 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); in amdgpu_ttm_backend_unbind()
1060 gtt->ttm.num_pages, gtt->offset); in amdgpu_ttm_backend_unbind()
1065 struct ttm_tt *ttm) in amdgpu_ttm_backend_destroy() argument
1067 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_backend_destroy()
1069 amdgpu_ttm_backend_unbind(bdev, ttm); in amdgpu_ttm_backend_destroy()
1070 ttm_tt_destroy_common(bdev, ttm); in amdgpu_ttm_backend_destroy()
1074 ttm_tt_fini(&gtt->ttm); in amdgpu_ttm_backend_destroy()
1105 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) { in amdgpu_ttm_tt_create()
1109 return &gtt->ttm; in amdgpu_ttm_tt_create()
1119 struct ttm_tt *ttm, in amdgpu_ttm_tt_populate() argument
1123 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_populate()
1127 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); in amdgpu_ttm_tt_populate()
1128 if (!ttm->sg) in amdgpu_ttm_tt_populate()
1133 if (ttm->page_flags & TTM_PAGE_FLAG_SG) in amdgpu_ttm_tt_populate()
1136 return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); in amdgpu_ttm_tt_populate()
1146 struct ttm_tt *ttm) in amdgpu_ttm_tt_unpopulate() argument
1148 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_unpopulate()
1152 amdgpu_ttm_tt_set_user_pages(ttm, NULL); in amdgpu_ttm_tt_unpopulate()
1153 kfree(ttm->sg); in amdgpu_ttm_tt_unpopulate()
1154 ttm->sg = NULL; in amdgpu_ttm_tt_unpopulate()
1158 if (ttm->page_flags & TTM_PAGE_FLAG_SG) in amdgpu_ttm_tt_unpopulate()
1162 return ttm_pool_free(&adev->mman.bdev.pool, ttm); in amdgpu_ttm_tt_unpopulate()
1181 if (!bo->ttm) { in amdgpu_ttm_tt_set_userptr()
1183 bo->ttm = amdgpu_ttm_tt_create(bo, 0); in amdgpu_ttm_tt_set_userptr()
1184 if (bo->ttm == NULL) in amdgpu_ttm_tt_set_userptr()
1189 bo->ttm->page_flags |= TTM_PAGE_FLAG_SG; in amdgpu_ttm_tt_set_userptr()
1191 gtt = (void *)bo->ttm; in amdgpu_ttm_tt_set_userptr()
1206 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_usermm() argument
1208 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_usermm()
1224 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, in amdgpu_ttm_tt_affect_userptr() argument
1227 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_affect_userptr()
1236 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; in amdgpu_ttm_tt_affect_userptr()
1246 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) in amdgpu_ttm_tt_is_userptr() argument
1248 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_is_userptr()
1259 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) in amdgpu_ttm_tt_is_readonly() argument
1261 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_is_readonly()
1277 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) in amdgpu_ttm_tt_pde_flags() argument
1288 if (ttm->caching == ttm_cached) in amdgpu_ttm_tt_pde_flags()
1308 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, in amdgpu_ttm_tt_pte_flags() argument
1311 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); in amdgpu_ttm_tt_pte_flags()
1316 if (!amdgpu_ttm_tt_is_readonly(ttm)) in amdgpu_ttm_tt_pte_flags()