Lines Matching full:pool
29 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
34 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
37 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
40 if (pool->p.pool_size) in page_pool_init()
41 ring_qsize = pool->p.pool_size; in page_pool_init()
51 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
52 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
53 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
57 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
61 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
64 if (!pool->p.max_len) in page_pool_init()
67 /* pool->p.offset has to be set according to the address in page_pool_init()
73 pool->p.flags & PP_FLAG_PAGE_FRAG) in page_pool_init()
76 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) in page_pool_init()
79 atomic_set(&pool->pages_state_release_cnt, 0); in page_pool_init()
82 refcount_set(&pool->user_cnt, 1); in page_pool_init()
84 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_init()
85 get_device(pool->p.dev); in page_pool_init()
92 struct page_pool *pool; in page_pool_create() local
95 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create()
96 if (!pool) in page_pool_create()
99 err = page_pool_init(pool, params); in page_pool_create()
102 kfree(pool); in page_pool_create()
106 return pool; in page_pool_create()
110 static void page_pool_return_page(struct page_pool *pool, struct page *page);
113 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() argument
115 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
127 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; in page_pool_refill_alloc_cache()
129 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ in page_pool_refill_alloc_cache()
143 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
150 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
154 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
157 if (likely(pool->alloc.count > 0)) in page_pool_refill_alloc_cache()
158 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
165 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() argument
170 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
172 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
174 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
180 static void page_pool_dma_sync_for_device(struct page_pool *pool, in page_pool_dma_sync_for_device() argument
186 dma_sync_size = min(dma_sync_size, pool->p.max_len); in page_pool_dma_sync_for_device()
187 dma_sync_single_range_for_device(pool->p.dev, dma_addr, in page_pool_dma_sync_for_device()
188 pool->p.offset, dma_sync_size, in page_pool_dma_sync_for_device()
189 pool->p.dma_dir); in page_pool_dma_sync_for_device()
192 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
199 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
201 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
202 (PAGE_SIZE << pool->p.order), in page_pool_dma_map()
203 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); in page_pool_dma_map()
204 if (dma_mapping_error(pool->p.dev, dma)) in page_pool_dma_map()
209 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_dma_map()
210 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
215 static void page_pool_set_pp_info(struct page_pool *pool, in page_pool_set_pp_info() argument
218 page->pp = pool; in page_pool_set_pp_info()
228 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order() argument
234 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
238 if ((pool->p.flags & PP_FLAG_DMA_MAP) && in __page_pool_alloc_page_order()
239 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
244 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
247 pool->pages_state_hold_cnt++; in __page_pool_alloc_page_order()
248 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
254 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() argument
258 unsigned int pp_flags = pool->p.flags; in __page_pool_alloc_pages_slow()
259 unsigned int pp_order = pool->p.order; in __page_pool_alloc_pages_slow()
265 return __page_pool_alloc_page_order(pool, gfp); in __page_pool_alloc_pages_slow()
268 if (unlikely(pool->alloc.count > 0)) in __page_pool_alloc_pages_slow()
269 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
272 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_pages_slow()
274 nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache); in __page_pool_alloc_pages_slow()
282 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
284 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
289 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
290 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
292 pool->pages_state_hold_cnt++; in __page_pool_alloc_pages_slow()
293 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
294 pool->pages_state_hold_cnt); in __page_pool_alloc_pages_slow()
298 if (likely(pool->alloc.count > 0)) in __page_pool_alloc_pages_slow()
299 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
310 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
315 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
320 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
330 static s32 page_pool_inflight(struct page_pool *pool) in page_pool_inflight() argument
332 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); in page_pool_inflight()
333 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); in page_pool_inflight()
338 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
349 void page_pool_release_page(struct page_pool *pool, struct page *page) in page_pool_release_page() argument
354 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_release_page()
362 /* When page is unmapped, it cannot be returned to our pool */ in page_pool_release_page()
363 dma_unmap_page_attrs(pool->p.dev, dma, in page_pool_release_page()
364 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_release_page()
370 /* This may be the last page returned, releasing the pool, so in page_pool_release_page()
371 * it is not safe to reference pool afterwards. in page_pool_release_page()
373 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); in page_pool_release_page()
374 trace_page_pool_state_release(pool, page, count); in page_pool_release_page()
379 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
381 page_pool_release_page(pool, page); in page_pool_return_page()
384 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
390 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
395 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
397 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
408 struct page_pool *pool) in page_pool_recycle_in_cache() argument
410 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) in page_pool_recycle_in_cache()
414 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
420 * the configured size min(dma_sync_size, pool->max_len).
425 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
429 if (pool->p.flags & PP_FLAG_PAGE_FRAG && in __page_pool_put_page()
445 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in __page_pool_put_page()
446 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
450 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
470 page_pool_release_page(pool, page); in __page_pool_put_page()
476 void page_pool_put_page(struct page_pool *pool, struct page *page, in page_pool_put_page() argument
479 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_page()
480 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_page()
482 page_pool_return_page(pool, page); in page_pool_put_page()
488 void page_pool_put_page_bulk(struct page_pool *pool, void **data, in page_pool_put_page_bulk() argument
496 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
506 page_pool_ring_lock(pool); in page_pool_put_page_bulk()
508 if (__ptr_ring_produce(&pool->ring, data[i])) in page_pool_put_page_bulk()
511 page_pool_ring_unlock(pool); in page_pool_put_page_bulk()
521 page_pool_return_page(pool, data[i]); in page_pool_put_page_bulk()
525 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
528 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_drain_frag()
536 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_drain_frag()
537 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
542 page_pool_return_page(pool, page); in page_pool_drain_frag()
546 static void page_pool_free_frag(struct page_pool *pool) in page_pool_free_frag() argument
548 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_free_frag()
549 struct page *page = pool->frag_page; in page_pool_free_frag()
551 pool->frag_page = NULL; in page_pool_free_frag()
557 page_pool_return_page(pool, page); in page_pool_free_frag()
560 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag() argument
564 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc_frag()
565 struct page *page = pool->frag_page; in page_pool_alloc_frag()
567 if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) || in page_pool_alloc_frag()
572 *offset = pool->frag_offset; in page_pool_alloc_frag()
575 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
581 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
583 pool->frag_page = NULL; in page_pool_alloc_frag()
587 pool->frag_page = page; in page_pool_alloc_frag()
590 pool->frag_users = 1; in page_pool_alloc_frag()
592 pool->frag_offset = size; in page_pool_alloc_frag()
597 pool->frag_users++; in page_pool_alloc_frag()
598 pool->frag_offset = *offset + size; in page_pool_alloc_frag()
603 static void page_pool_empty_ring(struct page_pool *pool) in page_pool_empty_ring() argument
608 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
614 page_pool_return_page(pool, page); in page_pool_empty_ring()
618 static void page_pool_free(struct page_pool *pool) in page_pool_free() argument
620 if (pool->disconnect) in page_pool_free()
621 pool->disconnect(pool); in page_pool_free()
623 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_free()
625 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_free()
626 put_device(pool->p.dev); in page_pool_free()
628 kfree(pool); in page_pool_free()
631 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) in page_pool_empty_alloc_cache_once() argument
635 if (pool->destroy_cnt) in page_pool_empty_alloc_cache_once()
642 while (pool->alloc.count) { in page_pool_empty_alloc_cache_once()
643 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
644 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
648 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() argument
650 page_pool_empty_alloc_cache_once(pool); in page_pool_scrub()
651 pool->destroy_cnt++; in page_pool_scrub()
656 page_pool_empty_ring(pool); in page_pool_scrub()
659 static int page_pool_release(struct page_pool *pool) in page_pool_release() argument
663 page_pool_scrub(pool); in page_pool_release()
664 inflight = page_pool_inflight(pool); in page_pool_release()
666 page_pool_free(pool); in page_pool_release()
674 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); in page_pool_release_retry() local
677 inflight = page_pool_release(pool); in page_pool_release_retry()
682 if (time_after_eq(jiffies, pool->defer_warn)) { in page_pool_release_retry()
683 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; in page_pool_release_retry()
685 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", in page_pool_release_retry()
687 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_release_retry()
691 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_release_retry()
694 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)) in page_pool_use_xdp_mem() argument
696 refcount_inc(&pool->user_cnt); in page_pool_use_xdp_mem()
697 pool->disconnect = disconnect; in page_pool_use_xdp_mem()
700 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
702 if (!pool) in page_pool_destroy()
705 if (!page_pool_put(pool)) in page_pool_destroy()
708 page_pool_free_frag(pool); in page_pool_destroy()
710 if (!page_pool_release(pool)) in page_pool_destroy()
713 pool->defer_start = jiffies; in page_pool_destroy()
714 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_destroy()
716 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); in page_pool_destroy()
717 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_destroy()
722 void page_pool_update_nid(struct page_pool *pool, int new_nid) in page_pool_update_nid() argument
726 trace_page_pool_update_nid(pool, new_nid); in page_pool_update_nid()
727 pool->p.nid = new_nid; in page_pool_update_nid()
729 /* Flush pool alloc cache, as refill will check NUMA node */ in page_pool_update_nid()
730 while (pool->alloc.count) { in page_pool_update_nid()
731 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
732 page_pool_return_page(pool, page); in page_pool_update_nid()
757 * The page will be returned to the pool here regardless of the in page_pool_return_skb_page()