Lines Matching full:page

18 #include <linux/page-flags.h>
48 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
58 /* In order to request DMA-sync-for-device the page in page_pool_init()
110 static void page_pool_return_page(struct page_pool *pool, struct page *page);
113 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
116 struct page *page; in page_pool_refill_alloc_cache() local
138 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
139 if (unlikely(!page)) in page_pool_refill_alloc_cache()
142 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
143 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
146 * (1) release 1 page to page-allocator and in page_pool_refill_alloc_cache()
148 * This limit stress on page buddy alloactor. in page_pool_refill_alloc_cache()
150 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
151 page = NULL; in page_pool_refill_alloc_cache()
156 /* Return last page */ in page_pool_refill_alloc_cache()
158 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
161 return page; in page_pool_refill_alloc_cache()
165 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached()
167 struct page *page; in __page_pool_get_cached() local
172 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
174 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
177 return page; in __page_pool_get_cached()
181 struct page *page, in page_pool_dma_sync_for_device() argument
184 dma_addr_t dma_addr = page_pool_get_dma_addr(page); in page_pool_dma_sync_for_device()
192 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
196 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr in page_pool_dma_map()
198 * into page private data (i.e 32bit cpu with 64bit DMA caps) in page_pool_dma_map()
199 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
201 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
207 page_pool_set_dma_addr(page, dma); in page_pool_dma_map()
210 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
216 struct page *page) in page_pool_set_pp_info() argument
218 page->pp = pool; in page_pool_set_pp_info()
219 page->pp_magic |= PP_SIGNATURE; in page_pool_set_pp_info()
222 static void page_pool_clear_pp_info(struct page *page) in page_pool_clear_pp_info() argument
224 page->pp_magic = 0; in page_pool_clear_pp_info()
225 page->pp = NULL; in page_pool_clear_pp_info()
228 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order()
231 struct page *page; in __page_pool_alloc_page_order() local
234 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
235 if (unlikely(!page)) in __page_pool_alloc_page_order()
239 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
240 put_page(page); in __page_pool_alloc_page_order()
244 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
248 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
249 return page; in __page_pool_alloc_page_order()
254 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow()
260 struct page *page; in __page_pool_alloc_pages_slow() local
279 * page element have not been (possibly) DMA mapped. in __page_pool_alloc_pages_slow()
282 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
284 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
285 put_page(page); in __page_pool_alloc_pages_slow()
289 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
290 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
293 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
297 /* Return last page */ in __page_pool_alloc_pages_slow()
299 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
301 page = NULL; in __page_pool_alloc_pages_slow()
303 /* When page just alloc'ed is should/must have refcnt 1. */ in __page_pool_alloc_pages_slow()
304 return page; in __page_pool_alloc_pages_slow()
310 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages()
312 struct page *page; in page_pool_alloc_pages() local
314 /* Fast-path: Get a page from cache */ in page_pool_alloc_pages()
315 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
316 if (page) in page_pool_alloc_pages()
317 return page; in page_pool_alloc_pages()
320 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
321 return page; in page_pool_alloc_pages()
344 /* Disconnects a page (from a page_pool). API users can have a need
345 * to disconnect a page (from a page_pool), to allow it to be used as
346 * a regular page (that will eventually be returned to the normal
347 * page-allocator via put_page).
349 void page_pool_release_page(struct page_pool *pool, struct page *page) in page_pool_release_page() argument
360 dma = page_pool_get_dma_addr(page); in page_pool_release_page()
362 /* When page is unmapped, it cannot be returned to our pool */ in page_pool_release_page()
366 page_pool_set_dma_addr(page, 0); in page_pool_release_page()
368 page_pool_clear_pp_info(page); in page_pool_release_page()
370 /* This may be the last page returned, releasing the pool, so in page_pool_release_page()
374 trace_page_pool_state_release(pool, page, count); in page_pool_release_page()
378 /* Return a page to the page allocator, cleaning up our state */
379 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
381 page_pool_release_page(pool, page); in page_pool_return_page()
383 put_page(page); in page_pool_return_page()
384 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
385 * knowing page is not part of page-cache (thus avoiding a in page_pool_return_page()
390 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
395 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
397 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
407 static bool page_pool_recycle_in_cache(struct page *page, in page_pool_recycle_in_cache() argument
413 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ in page_pool_recycle_in_cache()
414 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
418 /* If the page refcnt == 1, this will try to recycle the page.
421 * If the page refcnt != 1, then the page will be returned to memory
424 static __always_inline struct page *
425 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
428 /* It is not the last user for the page frag case */ in __page_pool_put_page()
430 page_pool_atomic_sub_frag_count_return(page, 1)) in __page_pool_put_page()
434 * one-frame-per-page, but have fallbacks that act like the in __page_pool_put_page()
435 * regular page allocator APIs. in __page_pool_put_page()
437 * refcnt == 1 means page_pool owns page, and can recycle it. in __page_pool_put_page()
439 * page is NOT reusable when allocated when system is under in __page_pool_put_page()
442 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { in __page_pool_put_page()
446 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
450 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
453 /* Page found as candidate for recycling */ in __page_pool_put_page()
454 return page; in __page_pool_put_page()
458 * Many drivers split up the page into fragments, and some in __page_pool_put_page()
470 page_pool_release_page(pool, page); in __page_pool_put_page()
471 put_page(page); in __page_pool_put_page()
476 void page_pool_put_page(struct page_pool *pool, struct page *page, in page_pool_put_page() argument
479 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_page()
480 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_page()
482 page_pool_return_page(pool, page); in page_pool_put_page()
494 struct page *page = virt_to_head_page(data[i]); in page_pool_put_page_bulk() local
496 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
498 if (page) in page_pool_put_page_bulk()
499 data[bulk_len++] = page; in page_pool_put_page_bulk()
525 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
526 struct page *page) in page_pool_drain_frag()
530 /* Some user is still using the page frag */ in page_pool_drain_frag()
531 if (likely(page_pool_atomic_sub_frag_count_return(page, in page_pool_drain_frag()
535 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { in page_pool_drain_frag()
537 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
539 return page; in page_pool_drain_frag()
542 page_pool_return_page(pool, page); in page_pool_drain_frag()
549 struct page *page = pool->frag_page; in page_pool_free_frag() local
553 if (!page || in page_pool_free_frag()
554 page_pool_atomic_sub_frag_count_return(page, drain_count)) in page_pool_free_frag()
557 page_pool_return_page(pool, page); in page_pool_free_frag()
560 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag()
565 struct page *page = pool->frag_page; in page_pool_alloc_frag() local
574 if (page && *offset + size > max_size) { in page_pool_alloc_frag()
575 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
576 if (page) in page_pool_alloc_frag()
580 if (!page) { in page_pool_alloc_frag()
581 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
582 if (unlikely(!page)) { in page_pool_alloc_frag()
587 pool->frag_page = page; in page_pool_alloc_frag()
593 page_pool_set_frag_count(page, BIAS_MAX); in page_pool_alloc_frag()
594 return page; in page_pool_alloc_frag()
599 return page; in page_pool_alloc_frag()
605 struct page *page; in page_pool_empty_ring() local
608 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
610 if (!(page_ref_count(page) == 1)) in page_pool_empty_ring()
612 __func__, page_ref_count(page)); in page_pool_empty_ring()
614 page_pool_return_page(pool, page); in page_pool_empty_ring()
633 struct page *page; in page_pool_empty_alloc_cache_once() local
643 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
644 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
724 struct page *page; in page_pool_update_nid() local
731 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
732 page_pool_return_page(pool, page); in page_pool_update_nid()
737 bool page_pool_return_skb_page(struct page *page) in page_pool_return_skb_page() argument
741 page = compound_head(page); in page_pool_return_skb_page()
743 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation in page_pool_return_skb_page()
745 * head page of compound page and bit 1 for pfmemalloc page, so in page_pool_return_skb_page()
748 * to avoid recycling the pfmemalloc page. in page_pool_return_skb_page()
750 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE)) in page_pool_return_skb_page()
753 pp = page->pp; in page_pool_return_skb_page()
756 * This will *not* work for NIC using a split-page memory model. in page_pool_return_skb_page()
757 * The page will be returned to the pool here regardless of the in page_pool_return_skb_page()
760 page_pool_put_full_page(pp, page, false); in page_pool_return_skb_page()