Home
last modified time | relevance | path

Searched refs:ttm (Results 1 – 25 of 56) sorted by relevance

123

/Linux-v5.4/drivers/gpu/drm/ttm/
Dttm_tt.c70 bo->ttm = NULL; in ttm_tt_create()
75 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); in ttm_tt_create()
76 if (unlikely(bo->ttm == NULL)) in ttm_tt_create()
85 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_tt_alloc_page_directory() argument
87 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), in ttm_tt_alloc_page_directory()
89 if (!ttm->pages) in ttm_tt_alloc_page_directory()
94 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) in ttm_dma_tt_alloc_page_directory() argument
96 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory()
97 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory()
98 sizeof(*ttm->dma_address), in ttm_dma_tt_alloc_page_directory()
[all …]
Dttm_agp_backend.c46 struct ttm_tt ttm; member
51 static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) in ttm_agp_bind() argument
53 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_bind()
54 struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page; in ttm_agp_bind()
60 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind()
65 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind()
66 struct page *page = ttm->pages[i]; in ttm_agp_bind()
85 static int ttm_agp_unbind(struct ttm_tt *ttm) in ttm_agp_unbind() argument
87 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_unbind()
98 static void ttm_agp_destroy(struct ttm_tt *ttm) in ttm_agp_destroy() argument
[all …]
Dttm_page_alloc.c1029 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) in ttm_pool_unpopulate_helper() argument
1031 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_pool_unpopulate_helper()
1038 if (!ttm->pages[i]) in ttm_pool_unpopulate_helper()
1041 ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); in ttm_pool_unpopulate_helper()
1045 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_unpopulate_helper()
1046 ttm->caching_state); in ttm_pool_unpopulate_helper()
1047 ttm->state = tt_unpopulated; in ttm_pool_unpopulate_helper()
1050 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) in ttm_pool_populate() argument
1052 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_pool_populate()
1056 if (ttm->state != tt_unpopulated) in ttm_pool_populate()
[all …]
Dttm_bo_util.c57 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_ttm() local
70 ttm_tt_unbind(ttm); in ttm_bo_move_ttm()
77 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); in ttm_bo_move_ttm()
82 ret = ttm_tt_bind(ttm, new_mem, ctx); in ttm_bo_move_ttm()
312 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, in ttm_copy_io_ttm_page() argument
316 struct page *d = ttm->pages[page]; in ttm_copy_io_ttm_page()
334 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, in ttm_copy_ttm_io_page() argument
338 struct page *s = ttm->pages[page]; in ttm_copy_ttm_io_page()
362 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() local
394 (ttm == NULL || (ttm->state == tt_unpopulated && in ttm_bo_move_memcpy()
[all …]
Dttm_page_alloc_dma.c839 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_pool_get_pages() local
847 ttm->pages[index] = d_page->p; in ttm_dma_pool_get_pages()
859 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_pool_gfp_flags() local
862 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) in ttm_dma_pool_gfp_flags()
866 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) in ttm_dma_pool_gfp_flags()
876 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) in ttm_dma_pool_gfp_flags()
889 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_populate() local
890 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_dma_populate()
891 unsigned long num_pages = ttm->num_pages; in ttm_dma_populate()
898 if (ttm->state != tt_unpopulated) in ttm_dma_populate()
[all …]
DMakefile5 ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
9 ttm-$(CONFIG_AGP) += ttm_agp_backend.o
11 obj-$(CONFIG_DRM_TTM) += ttm.o
Dttm_bo_vm.c118 struct ttm_tt *ttm = NULL; in ttm_bo_vm_fault() local
158 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { in ttm_bo_vm_fault()
242 ttm = bo->ttm; in ttm_bo_vm_fault()
247 if (ttm_tt_populate(ttm, &ctx)) { in ttm_bo_vm_fault()
263 page = ttm->pages[page_offset]; in ttm_bo_vm_fault()
374 if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { in ttm_bo_vm_access()
375 ret = ttm_tt_swapin(bo->ttm); in ttm_bo_vm_access()
/Linux-v5.4/drivers/gpu/drm/nouveau/
Dnouveau_sgdma.c13 struct ttm_dma_tt ttm; member
18 nouveau_sgdma_destroy(struct ttm_tt *ttm) in nouveau_sgdma_destroy() argument
20 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_destroy()
22 if (ttm) { in nouveau_sgdma_destroy()
23 ttm_dma_tt_fini(&nvbe->ttm); in nouveau_sgdma_destroy()
29 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) in nv04_sgdma_bind() argument
31 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nv04_sgdma_bind()
35 ret = nouveau_mem_host(reg, &nvbe->ttm); in nv04_sgdma_bind()
50 nv04_sgdma_unbind(struct ttm_tt *ttm) in nv04_sgdma_unbind() argument
52 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nv04_sgdma_unbind()
[all …]
Dnouveau_ttm.c171 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); in nouveau_ttm_mmap()
185 drm->ttm.type_host[!!kind] = typei; in nouveau_ttm_init_host()
191 drm->ttm.type_ncoh[!!kind] = typei; in nouveau_ttm_init_host()
224 drm->ttm.type_vram = typei; in nouveau_ttm_init()
226 drm->ttm.type_vram = -1; in nouveau_ttm_init()
236 ret = ttm_bo_device_init(&drm->ttm.bdev, in nouveau_ttm_init()
251 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, in nouveau_ttm_init()
258 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1), in nouveau_ttm_init()
268 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, in nouveau_ttm_init()
285 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini()
[all …]
Dnouveau_bo.c215 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
542 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
552 for (i = 0; i < ttm_dma->ttm.num_pages; i++) in nouveau_bo_sync_for_device()
562 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
572 for (i = 0; i < ttm_dma->ttm.num_pages; i++) in nouveau_bo_sync_for_cpu()
678 const u8 type = mmu->type[drm->ttm.type_vram].type; in nouveau_bo_init_mem_type()
1128 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
1146 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); in nouveau_bo_move_m2mf()
1212 &drm->ttm.copy); in nouveau_bo_move_init()
1214 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
[all …]
Dnouveau_mem.c107 type = drm->ttm.type_ncoh[!!mem->kind]; in nouveau_mem_host()
109 type = drm->ttm.type_host[0]; in nouveau_mem_host()
119 if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl; in nouveau_mem_host()
148 drm->ttm.type_vram, page, size, in nouveau_mem_vram()
156 drm->ttm.type_vram, page, size, in nouveau_mem_vram()
/Linux-v5.4/include/drm/ttm/
Dttm_tt.h63 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
73 int (*unbind) (struct ttm_tt *ttm);
83 void (*destroy) (struct ttm_tt *ttm);
133 struct ttm_tt ttm; member
161 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
175 void ttm_tt_fini(struct ttm_tt *ttm);
186 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
196 void ttm_tt_destroy(struct ttm_tt *ttm);
205 void ttm_tt_unbind(struct ttm_tt *ttm);
214 int ttm_tt_swapin(struct ttm_tt *ttm);
[all …]
Dttm_page_alloc.h50 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
59 void ttm_pool_unpopulate(struct ttm_tt *ttm);
/Linux-v5.4/drivers/gpu/drm/radeon/
Dradeon_ttm.c185 if (radeon_ttm_tt_has_userptr(bo->ttm)) in radeon_verify_access()
282 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); in radeon_move_vram_ram()
287 r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx); in radeon_move_vram_ram()
358 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in radeon_bo_move()
475 struct ttm_dma_tt ttm; member
485 static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) in radeon_ttm_tt_pin_userptr() argument
487 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); in radeon_ttm_tt_pin_userptr()
488 struct radeon_ttm_tt *gtt = (void *)ttm; in radeon_ttm_tt_pin_userptr()
502 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr()
510 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr()
[all …]
/Linux-v5.4/drivers/gpu/drm/virtio/
Dvirtgpu_ttm.c191 struct ttm_dma_tt ttm; member
195 static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm, in virtio_gpu_ttm_tt_bind() argument
199 container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); in virtio_gpu_ttm_tt_bind()
207 static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm) in virtio_gpu_ttm_tt_unbind() argument
210 container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); in virtio_gpu_ttm_tt_unbind()
218 static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm) in virtio_gpu_ttm_tt_destroy() argument
221 container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); in virtio_gpu_ttm_tt_destroy()
223 ttm_dma_tt_fini(&gtt->ttm); in virtio_gpu_ttm_tt_destroy()
243 gtt->ttm.ttm.func = &virtio_gpu_tt_func; in virtio_gpu_ttm_tt_create()
245 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) { in virtio_gpu_ttm_tt_create()
[all …]
Dvirtgpu_prime.c37 if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) in virtgpu_gem_prime_get_sg_table()
41 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, in virtgpu_gem_prime_get_sg_table()
42 bo->tbo.ttm->num_pages); in virtgpu_gem_prime_get_sg_table()
Dvirtgpu_object.c201 struct page **pages = bo->tbo.ttm->pages; in virtio_gpu_object_get_sg_table()
213 if (bo->tbo.ttm->state == tt_unpopulated) in virtio_gpu_object_get_sg_table()
214 bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); in virtio_gpu_object_get_sg_table()
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ttm.c228 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) in amdgpu_verify_access()
513 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); in amdgpu_move_vram_ram()
519 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); in amdgpu_move_vram_ram()
632 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in amdgpu_bo_move()
765 struct ttm_dma_tt ttm; member
789 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local
790 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_user_pages()
825 pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL); in amdgpu_ttm_tt_get_user_pages()
833 range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ? in amdgpu_ttm_tt_get_user_pages()
838 range->end = start + ttm->num_pages * PAGE_SIZE; in amdgpu_ttm_tt_get_user_pages()
[all …]
Damdgpu_ttm.h108 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
115 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_user_pages_done() argument
121 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
122 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
124 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
125 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
126 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
128 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
130 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
131 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
[all …]
Damdgpu_gmc.c45 struct ttm_dma_tt *ttm; in amdgpu_gmc_get_pde_for_bo() local
49 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_get_pde_for_bo()
50 *addr = ttm->dma_address[0]; in amdgpu_gmc_get_pde_for_bo()
59 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); in amdgpu_gmc_get_pde_for_bo()
122 struct ttm_dma_tt *ttm; in amdgpu_gmc_agp_addr() local
124 if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) in amdgpu_gmc_agp_addr()
127 ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_agp_addr()
128 if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) in amdgpu_gmc_agp_addr()
131 return adev->gmc.agp_start + ttm->dma_address[0]; in amdgpu_gmc_agp_addr()
Damdgpu_dma_buf.c54 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in amdgpu_gem_prime_get_sg_table()
122 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || in amdgpu_gem_prime_mmap()
339 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || in amdgpu_gem_prime_export()
389 bo->tbo.ttm->sg = sg; in amdgpu_gem_prime_import_sg_table()
/Linux-v5.4/drivers/gpu/drm/vmwgfx/
Dvmwgfx_ttm_buffer.c427 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; in vmw_ttm_map_dma()
428 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; in vmw_ttm_map_dma()
536 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_bo_map_dma()
553 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_bo_unmap_dma()
573 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_bo_sg_table()
579 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) in vmw_ttm_bind() argument
582 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_ttm_bind()
595 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind()
599 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind()
605 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind()
[all …]
Dvmwgfx_blit.c466 if (dst->ttm->state == tt_unpopulated) { in vmw_bo_cpu_blit()
467 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); in vmw_bo_cpu_blit()
472 if (src->ttm->state == tt_unpopulated) { in vmw_bo_cpu_blit()
473 ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx); in vmw_bo_cpu_blit()
482 d.dst_pages = dst->ttm->pages; in vmw_bo_cpu_blit()
483 d.src_pages = src->ttm->pages; in vmw_bo_cpu_blit()
/Linux-v5.4/drivers/gpu/drm/qxl/
Dqxl_ttm.c204 struct ttm_tt ttm; member
209 static int qxl_ttm_backend_bind(struct ttm_tt *ttm, in qxl_ttm_backend_bind() argument
212 struct qxl_ttm_tt *gtt = (void *)ttm; in qxl_ttm_backend_bind()
215 if (!ttm->num_pages) { in qxl_ttm_backend_bind()
217 ttm->num_pages, bo_mem, ttm); in qxl_ttm_backend_bind()
223 static int qxl_ttm_backend_unbind(struct ttm_tt *ttm) in qxl_ttm_backend_unbind() argument
229 static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) in qxl_ttm_backend_destroy() argument
231 struct qxl_ttm_tt *gtt = (void *)ttm; in qxl_ttm_backend_destroy()
233 ttm_tt_fini(&gtt->ttm); in qxl_ttm_backend_destroy()
253 gtt->ttm.func = &qxl_backend_func; in qxl_ttm_tt_create()
[all …]
/Linux-v5.4/arch/powerpc/perf/
Dppc970-pmu.c260 unsigned int ttm, grp; in p970_compute_mmcr() local
317 ttm = unitmap[i]; in p970_compute_mmcr()
318 ++ttmuse[(ttm >> 2) & 1]; in p970_compute_mmcr()
319 mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; in p970_compute_mmcr()
331 ttm = (unitmap[unit] >> 2) & 1; in p970_compute_mmcr()
333 ttm = 2; in p970_compute_mmcr()
335 ttm = 3; in p970_compute_mmcr()
339 mmcr1 |= (unsigned long)ttm in p970_compute_mmcr()

123