Lines Matching full:page
18 #include <linux/page-flags.h>
155 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
165 /* In order to request DMA-sync-for-device the page in page_pool_init()
223 static void page_pool_return_page(struct page_pool *pool, struct page *page);
226 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
229 struct page *page; in page_pool_refill_alloc_cache() local
250 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
251 if (unlikely(!page)) in page_pool_refill_alloc_cache()
254 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
255 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
258 * (1) release 1 page to page-allocator and in page_pool_refill_alloc_cache()
260 * This limit stress on page buddy alloactor. in page_pool_refill_alloc_cache()
262 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
264 page = NULL; in page_pool_refill_alloc_cache()
269 /* Return last page */ in page_pool_refill_alloc_cache()
271 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
275 return page; in page_pool_refill_alloc_cache()
279 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached()
281 struct page *page; in __page_pool_get_cached() local
286 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
289 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
292 return page; in __page_pool_get_cached()
296 struct page *page, in page_pool_dma_sync_for_device() argument
299 dma_addr_t dma_addr = page_pool_get_dma_addr(page); in page_pool_dma_sync_for_device()
307 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
311 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr in page_pool_dma_map()
313 * into page private data (i.e 32bit cpu with 64bit DMA caps) in page_pool_dma_map()
314 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
316 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
322 page_pool_set_dma_addr(page, dma); in page_pool_dma_map()
325 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
331 struct page *page) in page_pool_set_pp_info() argument
333 page->pp = pool; in page_pool_set_pp_info()
334 page->pp_magic |= PP_SIGNATURE; in page_pool_set_pp_info()
336 pool->p.init_callback(page, pool->p.init_arg); in page_pool_set_pp_info()
339 static void page_pool_clear_pp_info(struct page *page) in page_pool_clear_pp_info() argument
341 page->pp_magic = 0; in page_pool_clear_pp_info()
342 page->pp = NULL; in page_pool_clear_pp_info()
345 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order()
348 struct page *page; in __page_pool_alloc_page_order() local
351 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
352 if (unlikely(!page)) in __page_pool_alloc_page_order()
356 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
357 put_page(page); in __page_pool_alloc_page_order()
362 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
366 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
367 return page; in __page_pool_alloc_page_order()
372 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow()
378 struct page *page; in __page_pool_alloc_pages_slow() local
398 * page element have not been (possibly) DMA mapped. in __page_pool_alloc_pages_slow()
401 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
403 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
404 put_page(page); in __page_pool_alloc_pages_slow()
408 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
409 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
412 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
416 /* Return last page */ in __page_pool_alloc_pages_slow()
418 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
421 page = NULL; in __page_pool_alloc_pages_slow()
424 /* When page just alloc'ed is should/must have refcnt 1. */ in __page_pool_alloc_pages_slow()
425 return page; in __page_pool_alloc_pages_slow()
431 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages()
433 struct page *page; in page_pool_alloc_pages() local
435 /* Fast-path: Get a page from cache */ in page_pool_alloc_pages()
436 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
437 if (page) in page_pool_alloc_pages()
438 return page; in page_pool_alloc_pages()
441 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
442 return page; in page_pool_alloc_pages()
465 /* Disconnects a page (from a page_pool). API users can have a need
466 * to disconnect a page (from a page_pool), to allow it to be used as
467 * a regular page (that will eventually be returned to the normal
468 * page-allocator via put_page).
470 void page_pool_release_page(struct page_pool *pool, struct page *page) in page_pool_release_page() argument
481 dma = page_pool_get_dma_addr(page); in page_pool_release_page()
483 /* When page is unmapped, it cannot be returned to our pool */ in page_pool_release_page()
487 page_pool_set_dma_addr(page, 0); in page_pool_release_page()
489 page_pool_clear_pp_info(page); in page_pool_release_page()
491 /* This may be the last page returned, releasing the pool, so in page_pool_release_page()
495 trace_page_pool_state_release(pool, page, count); in page_pool_release_page()
499 /* Return a page to the page allocator, cleaning up our state */
500 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
502 page_pool_release_page(pool, page); in page_pool_return_page()
504 put_page(page); in page_pool_return_page()
505 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
506 * knowing page is not part of page-cache (thus avoiding a in page_pool_return_page()
511 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
516 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
518 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
533 static bool page_pool_recycle_in_cache(struct page *page, in page_pool_recycle_in_cache() argument
541 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ in page_pool_recycle_in_cache()
542 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
547 /* If the page refcnt == 1, this will try to recycle the page.
550 * If the page refcnt != 1, then the page will be returned to memory
553 static __always_inline struct page *
554 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
558 * one-frame-per-page, but have fallbacks that act like the in __page_pool_put_page()
559 * regular page allocator APIs. in __page_pool_put_page()
561 * refcnt == 1 means page_pool owns page, and can recycle it. in __page_pool_put_page()
563 * page is NOT reusable when allocated when system is under in __page_pool_put_page()
566 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { in __page_pool_put_page()
570 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
574 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
577 /* Page found as candidate for recycling */ in __page_pool_put_page()
578 return page; in __page_pool_put_page()
582 * Many drivers split up the page into fragments, and some in __page_pool_put_page()
595 page_pool_release_page(pool, page); in __page_pool_put_page()
596 put_page(page); in __page_pool_put_page()
601 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, in page_pool_put_defragged_page() argument
604 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_defragged_page()
605 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_defragged_page()
608 page_pool_return_page(pool, page); in page_pool_put_defragged_page()
620 struct page *page = virt_to_head_page(data[i]); in page_pool_put_page_bulk() local
622 /* It is not the last user for the page frag case */ in page_pool_put_page_bulk()
623 if (!page_pool_is_last_frag(pool, page)) in page_pool_put_page_bulk()
626 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
628 if (page) in page_pool_put_page_bulk()
629 data[bulk_len++] = page; in page_pool_put_page_bulk()
659 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
660 struct page *page) in page_pool_drain_frag()
664 /* Some user is still using the page frag */ in page_pool_drain_frag()
665 if (likely(page_pool_defrag_page(page, drain_count))) in page_pool_drain_frag()
668 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { in page_pool_drain_frag()
670 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
672 return page; in page_pool_drain_frag()
675 page_pool_return_page(pool, page); in page_pool_drain_frag()
682 struct page *page = pool->frag_page; in page_pool_free_frag() local
686 if (!page || page_pool_defrag_page(page, drain_count)) in page_pool_free_frag()
689 page_pool_return_page(pool, page); in page_pool_free_frag()
692 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag()
697 struct page *page = pool->frag_page; in page_pool_alloc_frag() local
706 if (page && *offset + size > max_size) { in page_pool_alloc_frag()
707 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
708 if (page) { in page_pool_alloc_frag()
714 if (!page) { in page_pool_alloc_frag()
715 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
716 if (unlikely(!page)) { in page_pool_alloc_frag()
721 pool->frag_page = page; in page_pool_alloc_frag()
727 page_pool_fragment_page(page, BIAS_MAX); in page_pool_alloc_frag()
728 return page; in page_pool_alloc_frag()
734 return page; in page_pool_alloc_frag()
740 struct page *page; in page_pool_empty_ring() local
743 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
745 if (!(page_ref_count(page) == 1)) in page_pool_empty_ring()
747 __func__, page_ref_count(page)); in page_pool_empty_ring()
749 page_pool_return_page(pool, page); in page_pool_empty_ring()
771 struct page *page; in page_pool_empty_alloc_cache_once() local
781 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
782 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
864 struct page *page; in page_pool_update_nid() local
871 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
872 page_pool_return_page(pool, page); in page_pool_update_nid()
877 bool page_pool_return_skb_page(struct page *page) in page_pool_return_skb_page() argument
881 page = compound_head(page); in page_pool_return_skb_page()
883 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation in page_pool_return_skb_page()
885 * head page of compound page and bit 1 for pfmemalloc page, so in page_pool_return_skb_page()
888 * to avoid recycling the pfmemalloc page. in page_pool_return_skb_page()
890 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE)) in page_pool_return_skb_page()
893 pp = page->pp; in page_pool_return_skb_page()
896 * This will *not* work for NIC using a split-page memory model. in page_pool_return_skb_page()
897 * The page will be returned to the pool here regardless of the in page_pool_return_skb_page()
900 page_pool_put_full_page(pp, page, false); in page_pool_return_skb_page()