Lines Matching refs:page
262 static void page_pool_return_page(struct page_pool *pool, struct page *page);
265 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
268 struct page *page; in page_pool_refill_alloc_cache() local
289 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
290 if (unlikely(!page)) in page_pool_refill_alloc_cache()
293 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
294 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
301 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
303 page = NULL; in page_pool_refill_alloc_cache()
310 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
314 return page; in page_pool_refill_alloc_cache()
318 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached()
320 struct page *page; in __page_pool_get_cached() local
325 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
328 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
331 return page; in __page_pool_get_cached()
335 struct page *page, in page_pool_dma_sync_for_device() argument
338 dma_addr_t dma_addr = page_pool_get_dma_addr(page); in page_pool_dma_sync_for_device()
346 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
355 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
362 page_pool_set_dma_addr(page, dma); in page_pool_dma_map()
365 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
371 struct page *page) in page_pool_set_pp_info() argument
373 page->pp = pool; in page_pool_set_pp_info()
374 page->pp_magic |= PP_SIGNATURE; in page_pool_set_pp_info()
376 pool->p.init_callback(page, pool->p.init_arg); in page_pool_set_pp_info()
379 static void page_pool_clear_pp_info(struct page *page) in page_pool_clear_pp_info() argument
381 page->pp_magic = 0; in page_pool_clear_pp_info()
382 page->pp = NULL; in page_pool_clear_pp_info()
385 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order()
388 struct page *page; in __page_pool_alloc_page_order() local
391 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
392 if (unlikely(!page)) in __page_pool_alloc_page_order()
396 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
397 put_page(page); in __page_pool_alloc_page_order()
402 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
406 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
407 return page; in __page_pool_alloc_page_order()
412 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow()
418 struct page *page; in __page_pool_alloc_pages_slow() local
441 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
443 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
444 put_page(page); in __page_pool_alloc_pages_slow()
448 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
449 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
452 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
458 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
461 page = NULL; in __page_pool_alloc_pages_slow()
465 return page; in __page_pool_alloc_pages_slow()
471 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages()
473 struct page *page; in page_pool_alloc_pages() local
476 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
477 if (page) in page_pool_alloc_pages()
478 return page; in page_pool_alloc_pages()
481 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
482 return page; in page_pool_alloc_pages()
510 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
521 dma = page_pool_get_dma_addr(page); in page_pool_return_page()
527 page_pool_set_dma_addr(page, 0); in page_pool_return_page()
529 page_pool_clear_pp_info(page); in page_pool_return_page()
535 trace_page_pool_state_release(pool, page, count); in page_pool_return_page()
537 put_page(page); in page_pool_return_page()
544 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
549 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
551 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
566 static bool page_pool_recycle_in_cache(struct page *page, in page_pool_recycle_in_cache() argument
575 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
586 static __always_inline struct page *
587 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
601 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { in __page_pool_put_page()
605 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
609 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
613 return page; in __page_pool_put_page()
629 page_pool_return_page(pool, page); in __page_pool_put_page()
634 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, in page_pool_put_defragged_page() argument
637 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_defragged_page()
638 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_defragged_page()
641 page_pool_return_page(pool, page); in page_pool_put_defragged_page()
668 struct page *page = virt_to_head_page(data[i]); in page_pool_put_page_bulk() local
671 if (!page_pool_is_last_frag(pool, page)) in page_pool_put_page_bulk()
674 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
676 if (page) in page_pool_put_page_bulk()
677 data[bulk_len++] = page; in page_pool_put_page_bulk()
707 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
708 struct page *page) in page_pool_drain_frag()
713 if (likely(page_pool_defrag_page(page, drain_count))) in page_pool_drain_frag()
716 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { in page_pool_drain_frag()
718 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
720 return page; in page_pool_drain_frag()
723 page_pool_return_page(pool, page); in page_pool_drain_frag()
730 struct page *page = pool->frag_page; in page_pool_free_frag() local
734 if (!page || page_pool_defrag_page(page, drain_count)) in page_pool_free_frag()
737 page_pool_return_page(pool, page); in page_pool_free_frag()
740 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag()
745 struct page *page = pool->frag_page; in page_pool_alloc_frag() local
754 if (page && *offset + size > max_size) { in page_pool_alloc_frag()
755 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
756 if (page) { in page_pool_alloc_frag()
762 if (!page) { in page_pool_alloc_frag()
763 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
764 if (unlikely(!page)) { in page_pool_alloc_frag()
769 pool->frag_page = page; in page_pool_alloc_frag()
775 page_pool_fragment_page(page, BIAS_MAX); in page_pool_alloc_frag()
776 return page; in page_pool_alloc_frag()
782 return page; in page_pool_alloc_frag()
788 struct page *page; in page_pool_empty_ring() local
791 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
793 if (!(page_ref_count(page) == 1)) in page_pool_empty_ring()
795 __func__, page_ref_count(page)); in page_pool_empty_ring()
797 page_pool_return_page(pool, page); in page_pool_empty_ring()
819 struct page *page; in page_pool_empty_alloc_cache_once() local
829 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
830 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
928 struct page *page; in page_pool_update_nid() local
935 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
936 page_pool_return_page(pool, page); in page_pool_update_nid()