Home
last modified time | relevance | path

Searched refs:tbo (Results 1 – 25 of 62) sorted by relevance

123

/Linux-v5.4/drivers/gpu/drm/qxl/
Dqxl_object.c30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) in qxl_ttm_bo_destroy() argument
35 bo = to_qxl_bo(tbo); in qxl_ttm_bo_destroy()
36 qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; in qxl_ttm_bo_destroy()
43 drm_gem_object_release(&bo->tbo.base); in qxl_ttm_bo_destroy()
98 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create()
113 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, in qxl_bo_create()
138 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in qxl_bo_kmap()
151 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; in qxl_bo_kmap_atomic_page()
156 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) in qxl_bo_kmap_atomic_page()
158 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV) in qxl_bo_kmap_atomic_page()
[all …]
Dqxl_object.h34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in qxl_bo_reserve()
37 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_reserve()
48 ttm_bo_unreserve(&bo->tbo); in qxl_bo_unreserve()
53 return bo->tbo.offset; in qxl_bo_gpu_offset()
58 return bo->tbo.num_pages << PAGE_SHIFT; in qxl_bo_size()
63 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in qxl_bo_mmap_offset()
71 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in qxl_bo_wait()
74 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_wait()
82 *mem_type = bo->tbo.mem.mem_type; in qxl_bo_wait()
84 r = ttm_bo_wait(&bo->tbo, true, no_wait); in qxl_bo_wait()
[all …]
Dqxl_gem.c35 struct ttm_buffer_object *tbo; in qxl_gem_object_free() local
41 tbo = &qobj->tbo; in qxl_gem_object_free()
42 ttm_bo_put(tbo); in qxl_gem_object_free()
66 *obj = &qbo->tbo.base; in qxl_gem_object_create()
Dqxl_drv.h74 struct ttm_buffer_object tbo; member
96 #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base)
97 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
310 (bo->tbo.mem.mem_type == TTM_PL_VRAM) in qxl_bo_physical_address()
313 WARN_ON_ONCE((bo->tbo.offset & slot->gpu_offset) != slot->gpu_offset); in qxl_bo_physical_address()
316 return slot->high_bits | (bo->tbo.offset - slot->gpu_offset + offset); in qxl_bo_physical_address()
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/
Damdgpu_object.c64 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_subtract_pin_size()
66 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { in amdgpu_bo_subtract_pin_size()
70 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { in amdgpu_bo_subtract_pin_size()
75 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) in amdgpu_bo_destroy() argument
77 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_bo_destroy()
78 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); in amdgpu_bo_destroy()
85 if (bo->tbo.base.import_attach) in amdgpu_bo_destroy()
86 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in amdgpu_bo_destroy()
87 drm_gem_object_release(&bo->tbo.base); in amdgpu_bo_destroy()
127 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_bo_placement_from_domain()
[all …]
Damdgpu_object.h85 struct ttm_buffer_object tbo; member
111 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) in ttm_to_amdgpu_bo() argument
113 return container_of(tbo, struct amdgpu_bo, tbo); in ttm_to_amdgpu_bo()
154 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_reserve()
157 r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in amdgpu_bo_reserve()
168 ttm_bo_unreserve(&bo->tbo); in amdgpu_bo_unreserve()
173 return bo->tbo.num_pages << PAGE_SHIFT; in amdgpu_bo_size()
178 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_ngpu_pages()
183 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_gpu_page_alignment()
194 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in amdgpu_bo_mmap_offset()
[all …]
Damdgpu_dma_buf.c52 int npages = bo->tbo.num_pages; in amdgpu_gem_prime_get_sg_table()
54 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in amdgpu_gem_prime_get_sg_table()
71 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in amdgpu_gem_prime_vmap()
108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_prime_mmap()
122 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || in amdgpu_gem_prime_mmap()
198 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_map_attach()
219 r = __dma_resv_make_exclusive(bo->tbo.base.resv); in amdgpu_dma_buf_map_attach()
254 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_map_detach()
286 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_begin_cpu_access()
303 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_dma_buf_begin_cpu_access()
[all …]
Damdgpu_gem.c88 *obj = &bo->tbo.base; in amdgpu_gem_object_create()
125 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_gem_object_open()
132 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); in amdgpu_gem_object_open()
137 abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_gem_object_open()
158 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_object_close()
172 tv.bo = &bo->tbo; in amdgpu_gem_object_close()
255 resv = vm->root.base.bo->tbo.base.resv; in amdgpu_gem_create_ioctl()
321 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); in amdgpu_gem_userptr_ioctl()
332 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); in amdgpu_gem_userptr_ioctl()
341 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_gem_userptr_ioctl()
[all …]
Damdgpu_gtt_mgr.c35 struct ttm_buffer_object *tbo; member
169 struct ttm_buffer_object *tbo, in amdgpu_gtt_mgr_alloc() argument
220 struct ttm_buffer_object *tbo, in amdgpu_gtt_mgr_new() argument
229 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && in amdgpu_gtt_mgr_new()
245 node->tbo = tbo; in amdgpu_gtt_mgr_new()
249 r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); in amdgpu_gtt_mgr_new()
321 r = amdgpu_ttm_recover_gart(node->tbo); in amdgpu_gtt_mgr_recover()
Damdgpu_amdkfd_gpuvm.c196 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_unreserve_memory_limit()
221 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_amdkfd_remove_eviction_fence()
280 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate()
286 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_bo_validate()
313 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_validate_pt_pd_bos()
349 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_update_pds()
380 unsigned long bo_size = bo->tbo.mem.size; in add_bo_to_vm()
452 entry->bo = &bo->tbo; in add_kgd_mem_to_kfd_bo_list()
494 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); in init_user_pages()
507 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); in init_user_pages()
[all …]
Damdgpu_gmc.c44 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_get_pde_for_bo()
47 switch (bo->tbo.mem.mem_type) { in amdgpu_gmc_get_pde_for_bo()
49 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_get_pde_for_bo()
59 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); in amdgpu_gmc_get_pde_for_bo()
69 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_pd_addr()
Damdgpu_vm.c206 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted()
305 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
309 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init()
315 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) in amdgpu_vm_bo_base_init()
562 entry->tv.bo = &vm->root.base.bo->tbo; in amdgpu_vm_get_pd_bo()
586 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify()
622 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
624 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, in amdgpu_vm_move_to_lru_tail()
661 if (bo->tbo.type != ttm_bo_type_kernel) { in amdgpu_vm_validate_pt_bos()
745 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_vm_clear_bo()
[all …]
Damdgpu_cs.c54 p->uf_entry.tv.bo = &bo->tbo; in amdgpu_cs_user_fence_chunk()
66 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_cs_user_fence_chunk()
401 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_cs_bo_validate()
405 .resv = bo->tbo.base.resv, in amdgpu_cs_bo_validate()
437 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_bo_validate()
468 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_cs_try_evict()
480 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); in amdgpu_cs_try_evict()
496 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_try_evict()
541 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); in amdgpu_cs_list_validate()
545 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && in amdgpu_cs_list_validate()
[all …]
Damdgpu_mn.c179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) in amdgpu_mn_invalidate_node()
182 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, in amdgpu_mn_invalidate_node()
279 if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, in amdgpu_mn_sync_pagetables_hsa()
378 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_mn_register()
436 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_mn_unregister()
Damdgpu_ttm.c230 return drm_vma_node_verify_access(&abo->tbo.base.vma_node, in amdgpu_verify_access()
789 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages()
998 struct ttm_buffer_object *tbo, in amdgpu_ttm_gart_bind() argument
1001 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); in amdgpu_ttm_gart_bind()
1002 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind()
1151 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) in amdgpu_ttm_recover_gart() argument
1153 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_ttm_recover_gart()
1157 if (!tbo->ttm) in amdgpu_ttm_recover_gart()
1160 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); in amdgpu_ttm_recover_gart()
1161 r = amdgpu_ttm_gart_bind(adev, tbo, flags); in amdgpu_ttm_recover_gart()
[all …]
/Linux-v5.4/drivers/gpu/drm/radeon/
Dradeon_object.c57 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; in radeon_update_memory_usage()
75 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) in radeon_ttm_bo_destroy() argument
79 bo = container_of(tbo, struct radeon_bo, tbo); in radeon_ttm_bo_destroy()
81 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); in radeon_ttm_bo_destroy()
88 if (bo->tbo.base.import_attach) in radeon_ttm_bo_destroy()
89 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in radeon_ttm_bo_destroy()
90 drm_gem_object_release(&bo->tbo.base); in radeon_ttm_bo_destroy()
212 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); in radeon_bo_create()
262 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, in radeon_bo_create()
287 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in radeon_bo_kmap()
[all …]
Dradeon_prime.c37 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table()
39 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table()
47 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in radeon_gem_prime_vmap()
83 return &bo->tbo.base; in radeon_gem_prime_import_sg_table()
124 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) in radeon_gem_prime_export()
Dradeon_object.h68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in radeon_bo_reserve()
79 ttm_bo_unreserve(&bo->tbo); in radeon_bo_unreserve()
93 return bo->tbo.offset; in radeon_bo_gpu_offset()
98 return bo->tbo.num_pages << PAGE_SHIFT; in radeon_bo_size()
103 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_ngpu_pages()
108 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_gpu_page_alignment()
119 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in radeon_bo_mmap_offset()
Dradeon_gem.c86 *obj = &robj->tbo.base; in radeon_gem_object_create()
117 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); in radeon_gem_set_domain()
333 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); in radeon_gem_userptr_ioctl()
352 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_gem_userptr_ioctl()
422 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { in radeon_mode_dumb_mmap()
454 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); in radeon_gem_busy_ioctl()
460 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); in radeon_gem_busy_ioctl()
483 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); in radeon_gem_wait_idle_ioctl()
490 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); in radeon_gem_wait_idle_ioctl()
561 tv.bo = &bo_va->bo->tbo; in radeon_gem_va_update_vm()
[all …]
Dradeon_mn.c99 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) in radeon_mn_invalidate_range_start()
108 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, in radeon_mn_invalidate_range_start()
114 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_mn_invalidate_range_start()
/Linux-v5.4/drivers/gpu/drm/virtio/
Dvirtgpu_object.c60 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) in virtio_gpu_ttm_bo_destroy() argument
65 bo = container_of(tbo, struct virtio_gpu_object, tbo); in virtio_gpu_ttm_bo_destroy()
133 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, in virtio_gpu_object_create()
154 mainbuf.bo = &bo->tbo; in virtio_gpu_object_create()
190 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in virtio_gpu_object_kmap()
201 struct page **pages = bo->tbo.ttm->pages; in virtio_gpu_object_get_sg_table()
202 int nr_pages = bo->tbo.num_pages; in virtio_gpu_object_get_sg_table()
213 if (bo->tbo.ttm->state == tt_unpopulated) in virtio_gpu_object_get_sg_table()
214 bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); in virtio_gpu_object_get_sg_table()
246 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in virtio_gpu_object_wait()
[all …]
Dvirtgpu_prime.c37 if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) in virtgpu_gem_prime_get_sg_table()
41 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, in virtgpu_gem_prime_get_sg_table()
42 bo->tbo.ttm->num_pages); in virtgpu_gem_prime_get_sg_table()
Dvirtgpu_drv.h80 struct ttm_buffer_object tbo; member
382 ttm_bo_get(&bo->tbo); in virtio_gpu_object_ref()
388 struct ttm_buffer_object *tbo; in virtio_gpu_object_unref() local
392 tbo = &((*bo)->tbo); in virtio_gpu_object_unref()
393 ttm_bo_put(tbo); in virtio_gpu_object_unref()
399 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in virtio_gpu_object_mmap_offset()
407 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in virtio_gpu_object_reserve()
421 ttm_bo_unreserve(&bo->tbo); in virtio_gpu_object_unreserve()
Dvirtgpu_ttm.c201 virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); in virtio_gpu_ttm_tt_bind()
212 virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); in virtio_gpu_ttm_tt_unbind()
244 gtt->obj = container_of(bo, struct virtio_gpu_object, tbo); in virtio_gpu_ttm_tt_create()
252 static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) in virtio_gpu_bo_swap_notify() argument
256 bo = container_of(tbo, struct virtio_gpu_object, tbo); in virtio_gpu_bo_swap_notify()
Dvirtgpu_ioctl.c74 qobj = container_of(bo, struct virtio_gpu_object, tbo); in virtio_gpu_object_list_validate()
92 qobj = container_of(bo, struct virtio_gpu_object, tbo); in virtio_gpu_unref_list()
188 buflist[i].bo = &qobj->tbo; in virtio_gpu_execbuffer_ioctl()
384 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); in virtio_gpu_transfer_from_host_ioctl()
399 dma_resv_add_excl_fence(qobj->tbo.base.resv, in virtio_gpu_transfer_from_host_ioctl()
434 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); in virtio_gpu_transfer_to_host_ioctl()
453 dma_resv_add_excl_fence(qobj->tbo.base.resv, in virtio_gpu_transfer_to_host_ioctl()

123