Lines Matching +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
30 * Additional to that allocations from the DMA coherent API are pooled as well
35 #include <linux/dma-mapping.h>
50 * struct ttm_pool_dma - Helper object for coherent DMA mappings
52 * @addr: original DMA address returned for the mapping
62 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
78 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, in ttm_pool_alloc_page() argument
82 struct ttm_pool_dma *dma; in ttm_pool_alloc_page() local
94 if (!pool->use_dma_alloc) { in ttm_pool_alloc_page()
97 p->private = order; in ttm_pool_alloc_page()
101 dma = kmalloc(sizeof(*dma), GFP_KERNEL); in ttm_pool_alloc_page()
102 if (!dma) in ttm_pool_alloc_page()
108 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
109 &dma->addr, gfp_flags, attr); in ttm_pool_alloc_page()
113 /* TODO: This is an illegal abuse of the DMA API, but we need to rework in ttm_pool_alloc_page()
114 * TTM page fault handling and extend the DMA API to clean this up. in ttm_pool_alloc_page()
121 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page()
122 p->private = (unsigned long)dma; in ttm_pool_alloc_page()
126 kfree(dma); in ttm_pool_alloc_page()
131 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, in ttm_pool_free_page() argument
135 struct ttm_pool_dma *dma; in ttm_pool_free_page() local
146 if (!pool || !pool->use_dma_alloc) { in ttm_pool_free_page()
154 dma = (void *)p->private; in ttm_pool_free_page()
155 vaddr = (void *)(dma->vaddr & PAGE_MASK); in ttm_pool_free_page()
156 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, in ttm_pool_free_page()
158 kfree(dma); in ttm_pool_free_page()
166 unsigned int num_pages = last - first; in ttm_pool_apply_caching()
183 /* Map pages of 1 << order size and fill the DMA address array */
184 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, in ttm_pool_map() argument
190 if (pool->use_dma_alloc) { in ttm_pool_map()
191 struct ttm_pool_dma *dma = (void *)p->private; in ttm_pool_map() local
193 addr = dma->addr; in ttm_pool_map()
197 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); in ttm_pool_map()
198 if (dma_mapping_error(pool->dev, addr)) in ttm_pool_map()
199 return -EFAULT; in ttm_pool_map()
202 for (i = 1 << order; i ; --i) { in ttm_pool_map()
211 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, in ttm_pool_unmap() argument
215 if (pool->use_dma_alloc) in ttm_pool_unmap()
218 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, in ttm_pool_unmap()
225 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give()
234 spin_lock(&pt->lock); in ttm_pool_type_give()
235 list_add(&p->lru, &pt->pages); in ttm_pool_type_give()
236 spin_unlock(&pt->lock); in ttm_pool_type_give()
237 atomic_long_add(1 << pt->order, &allocated_pages); in ttm_pool_type_give()
245 spin_lock(&pt->lock); in ttm_pool_type_take()
246 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); in ttm_pool_type_take()
248 atomic_long_sub(1 << pt->order, &allocated_pages); in ttm_pool_type_take()
249 list_del(&p->lru); in ttm_pool_type_take()
251 spin_unlock(&pt->lock); in ttm_pool_type_take()
256 /* Initialize and add a pool type to the global shrinker list */
257 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, in ttm_pool_type_init() argument
260 pt->pool = pool; in ttm_pool_type_init()
261 pt->caching = caching; in ttm_pool_type_init()
262 pt->order = order; in ttm_pool_type_init()
263 spin_lock_init(&pt->lock); in ttm_pool_type_init()
264 INIT_LIST_HEAD(&pt->pages); in ttm_pool_type_init()
267 list_add_tail(&pt->shrinker_list, &shrinker_list); in ttm_pool_type_init()
277 list_del(&pt->shrinker_list); in ttm_pool_type_fini()
281 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_type_fini()
285 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, in ttm_pool_select_type() argument
289 if (pool->use_dma_alloc) in ttm_pool_select_type()
290 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
295 if (pool->use_dma32) in ttm_pool_select_type()
300 if (pool->use_dma32) in ttm_pool_select_type()
321 list_move_tail(&pt->shrinker_list, &shrinker_list); in ttm_pool_shrink()
326 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_shrink()
327 num_pages = 1 << pt->order; in ttm_pool_shrink()
336 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) in ttm_pool_page_order() argument
338 if (pool->use_dma_alloc) { in ttm_pool_page_order()
339 struct ttm_pool_dma *dma = (void *)p->private; in ttm_pool_page_order() local
341 return dma->vaddr & ~PAGE_MASK; in ttm_pool_page_order()
344 return p->private; in ttm_pool_page_order()
348 * ttm_pool_alloc - Fill a ttm_tt object
350 * @pool: ttm_pool to use
354 * Fill the ttm_tt object with pages and also make sure to DMA map them when
359 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, in ttm_pool_alloc() argument
362 unsigned long num_pages = tt->num_pages; in ttm_pool_alloc()
363 dma_addr_t *dma_addr = tt->dma_address; in ttm_pool_alloc()
364 struct page **caching = tt->pages; in ttm_pool_alloc()
365 struct page **pages = tt->pages; in ttm_pool_alloc()
372 WARN_ON(dma_addr && !pool->dev); in ttm_pool_alloc()
374 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) in ttm_pool_alloc()
377 if (ctx->gfp_retry_mayfail) in ttm_pool_alloc()
380 if (pool->use_dma32) in ttm_pool_alloc()
385 for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); in ttm_pool_alloc()
391 pt = ttm_pool_select_type(pool, tt->caching, order); in ttm_pool_alloc()
396 p = ttm_pool_alloc_page(pool, gfp_flags, order); in ttm_pool_alloc()
403 --order; in ttm_pool_alloc()
406 r = -ENOMEM; in ttm_pool_alloc()
412 tt->caching); in ttm_pool_alloc()
419 r = ttm_pool_map(pool, order, p, &dma_addr); in ttm_pool_alloc()
424 num_pages -= 1 << order; in ttm_pool_alloc()
425 for (i = 1 << order; i; --i) in ttm_pool_alloc()
429 r = ttm_pool_apply_caching(caching, pages, tt->caching); in ttm_pool_alloc()
436 ttm_pool_free_page(pool, tt->caching, order, p); in ttm_pool_alloc()
439 num_pages = tt->num_pages - num_pages; in ttm_pool_alloc()
441 order = ttm_pool_page_order(pool, tt->pages[i]); in ttm_pool_alloc()
442 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); in ttm_pool_alloc()
451 * ttm_pool_free - Free the backing pages from a ttm_tt object
453 * @pool: Pool to give pages back to.
456 * Give the packing pages back to a pool or free them
458 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) in ttm_pool_free() argument
462 for (i = 0; i < tt->num_pages; ) { in ttm_pool_free()
463 struct page *p = tt->pages[i]; in ttm_pool_free()
467 order = ttm_pool_page_order(pool, p); in ttm_pool_free()
469 if (tt->dma_address) in ttm_pool_free()
470 ttm_pool_unmap(pool, tt->dma_address[i], num_pages); in ttm_pool_free()
472 pt = ttm_pool_select_type(pool, tt->caching, order); in ttm_pool_free()
474 ttm_pool_type_give(pt, tt->pages[i]); in ttm_pool_free()
476 ttm_pool_free_page(pool, tt->caching, order, in ttm_pool_free()
477 tt->pages[i]); in ttm_pool_free()
488 * ttm_pool_init - Initialize a pool
490 * @pool: the pool to initialize
491 * @dev: device for DMA allocations and mappings
492 * @use_dma_alloc: true if coherent DMA alloc should be used
495 * Initialize the pool and its pool types.
497 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, in ttm_pool_init() argument
504 pool->dev = dev; in ttm_pool_init()
505 pool->use_dma_alloc = use_dma_alloc; in ttm_pool_init()
506 pool->use_dma32 = use_dma32; in ttm_pool_init()
511 ttm_pool_type_init(&pool->caching[i].orders[j], in ttm_pool_init()
512 pool, i, j); in ttm_pool_init()
517 * ttm_pool_fini - Cleanup a pool
519 * @pool: the pool to clean up
521 * Free all pages in the pool and unregister the types from the global
524 void ttm_pool_fini(struct ttm_pool *pool) in ttm_pool_fini() argument
528 if (pool->use_dma_alloc) { in ttm_pool_fini()
531 ttm_pool_type_fini(&pool->caching[i].orders[j]); in ttm_pool_fini()
534 /* We removed the pool types from the LRU, but we need to also make sure in ttm_pool_fini()
535 * that no shrinker is concurrently freeing pages from the pool. in ttm_pool_fini()
569 spin_lock(&pt->lock); in ttm_pool_type_count()
571 list_for_each_entry(p, &pt->pages, lru) in ttm_pool_type_count()
573 spin_unlock(&pt->lock); in ttm_pool_type_count()
585 seq_printf(m, " ---%2u---", i); in ttm_pool_debugfs_header()
589 /* Dump information about the different pool types */
630 * ttm_pool_debugfs - Debugfs dump function for a pool
632 * @pool: the pool to dump the information for
635 * Make a debugfs dump with the per pool and global information.
637 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) in ttm_pool_debugfs() argument
641 if (!pool->use_dma_alloc) { in ttm_pool_debugfs()
650 seq_puts(m, "DMA "); in ttm_pool_debugfs()
662 ttm_pool_debugfs_orders(pool->caching[i].orders, m); in ttm_pool_debugfs()
688 * ttm_pool_mgr_init - Initialize globals
725 return register_shrinker(&mm_shrinker, "drm-ttm_pool"); in ttm_pool_mgr_init()
729 * ttm_pool_mgr_fini - Finalize globals