Lines Matching refs:pool
74 struct dma_pool *pool; in show_pools() local
84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools()
88 spin_lock_irq(&pool->lock); in show_pools()
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
93 spin_unlock_irq(&pool->lock); in show_pools()
97 pool->name, blocks, in show_pools()
98 pages * (pool->allocation / pool->size), in show_pools()
99 pool->size, pages); in show_pools()
206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
209 unsigned int next_boundary = pool->boundary; in pool_initialise_page()
212 unsigned int next = offset + pool->size; in pool_initialise_page()
213 if (unlikely((next + pool->size) >= next_boundary)) { in pool_initialise_page()
215 next_boundary += pool->boundary; in pool_initialise_page()
219 } while (offset < pool->allocation); in pool_initialise_page()
222 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) in pool_alloc_page() argument
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
233 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_alloc_page()
235 pool_initialise_page(pool, page); in pool_alloc_page()
250 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) in pool_free_page() argument
255 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_free_page()
257 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); in pool_free_page()
270 void dma_pool_destroy(struct dma_pool *pool) in dma_pool_destroy() argument
274 if (unlikely(!pool)) in dma_pool_destroy()
279 list_del(&pool->pools); in dma_pool_destroy()
280 if (pool->dev && list_empty(&pool->dev->dma_pools)) in dma_pool_destroy()
284 device_remove_file(pool->dev, &dev_attr_pools); in dma_pool_destroy()
287 while (!list_empty(&pool->page_list)) { in dma_pool_destroy()
289 page = list_entry(pool->page_list.next, in dma_pool_destroy()
292 if (pool->dev) in dma_pool_destroy()
293 dev_err(pool->dev, in dma_pool_destroy()
295 pool->name, page->vaddr); in dma_pool_destroy()
298 pool->name, page->vaddr); in dma_pool_destroy()
303 pool_free_page(pool, page); in dma_pool_destroy()
306 kfree(pool); in dma_pool_destroy()
320 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, in dma_pool_alloc() argument
330 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
331 list_for_each_entry(page, &pool->page_list, page_list) { in dma_pool_alloc()
332 if (page->offset < pool->allocation) in dma_pool_alloc()
337 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
339 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); in dma_pool_alloc()
343 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
345 list_add(&page->page_list, &pool->page_list); in dma_pool_alloc()
357 for (i = sizeof(page->offset); i < pool->size; i++) { in dma_pool_alloc()
360 if (pool->dev) in dma_pool_alloc()
361 dev_err(pool->dev, in dma_pool_alloc()
363 pool->name, retval); in dma_pool_alloc()
366 pool->name, retval); in dma_pool_alloc()
373 data, pool->size, 1); in dma_pool_alloc()
378 memset(retval, POOL_POISON_ALLOCATED, pool->size); in dma_pool_alloc()
380 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
383 memset(retval, 0, pool->size); in dma_pool_alloc()
389 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) in pool_find_page() argument
393 list_for_each_entry(page, &pool->page_list, page_list) { in pool_find_page()
396 if ((dma - page->dma) < pool->allocation) in pool_find_page()
411 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in dma_pool_free() argument
417 spin_lock_irqsave(&pool->lock, flags); in dma_pool_free()
418 page = pool_find_page(pool, dma); in dma_pool_free()
420 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
421 if (pool->dev) in dma_pool_free()
422 dev_err(pool->dev, in dma_pool_free()
424 pool->name, vaddr, (unsigned long)dma); in dma_pool_free()
427 pool->name, vaddr, (unsigned long)dma); in dma_pool_free()
434 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
435 if (pool->dev) in dma_pool_free()
436 dev_err(pool->dev, in dma_pool_free()
438 pool->name, vaddr, &dma); in dma_pool_free()
441 pool->name, vaddr, &dma); in dma_pool_free()
446 while (chain < pool->allocation) { in dma_pool_free()
451 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
452 if (pool->dev) in dma_pool_free()
453 dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", in dma_pool_free()
454 pool->name, &dma); in dma_pool_free()
457 pool->name, &dma); in dma_pool_free()
461 memset(vaddr, POOL_POISON_FREED, pool->size); in dma_pool_free()
472 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
481 struct dma_pool *pool = *(struct dma_pool **)res; in dmam_pool_release() local
483 dma_pool_destroy(pool); in dmam_pool_release()
505 struct dma_pool **ptr, *pool; in dmam_pool_create() local
511 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); in dmam_pool_create()
512 if (pool) in dmam_pool_create()
517 return pool; in dmam_pool_create()
527 void dmam_pool_destroy(struct dma_pool *pool) in dmam_pool_destroy() argument
529 struct device *dev = pool->dev; in dmam_pool_destroy()
531 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); in dmam_pool_destroy()