Lines Matching +full:dma +full:- +full:pool
1 /* SPDX-License-Identifier: GPL-2.0
12 * uses one-frame-per-page, but have fallbacks that act like the
19 * API keeps track of in-flight pages, in-order to let API user know
27 * will release the DMA mapping and in-flight state accounting. We
35 #include <linux/dma-direction.h>
37 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
42 * DMA-synced-for-device according to
45 * Please note DMA-sync-for-CPU is still
57 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
61 * Keeping room for more objects, is due to XDP_DROP use-case. As
79 struct device *dev; /* device, for DMA pre-mapping purposes */
80 enum dma_data_direction dma_dir; /* DMA mapping direction */
81 unsigned int max_len; /* max DMA sync memory size */
82 unsigned int offset; /* DMA addr offset */
90 u64 slow; /* slow-path order 0 allocations */
91 u64 slow_high_order; /* slow-path high order allocations */
103 u64 ring_full; /* page was released from page-pool because
113 * stats for the page pool.
125 * Drivers that wish to harvest page pool stats and report them to users
127 * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
129 bool page_pool_get_stats(struct page_pool *pool,
177 * RX-queue. As the RX-queue is already protected by
191 * effeciently, it a way that doesn't bounce cache-lines.
198 /* recycle stats are per-cpu to avoid locking */
203 /* A page_pool is strictly tied to a single RX-queue being
212 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
214 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument
218 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages()
221 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
224 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() argument
230 return page_pool_alloc_frag(pool, offset, size, gfp); in page_pool_dev_alloc_frag()
233 /* get the stored dma direction. A driver might decide to treat this locally and
237 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) in page_pool_get_dma_dir() argument
239 return pool->p.dma_dir; in page_pool_get_dma_dir()
249 void page_pool_destroy(struct page_pool *pool);
250 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
252 void page_pool_release_page(struct page_pool *pool, struct page *page);
253 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
256 static inline void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
260 static inline void page_pool_use_xdp_mem(struct page_pool *pool, in page_pool_use_xdp_mem() argument
265 static inline void page_pool_release_page(struct page_pool *pool, in page_pool_release_page() argument
270 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, in page_pool_put_page_bulk() argument
276 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
282 atomic_long_set(&page->pp_frag_count, nr); in page_pool_fragment_page()
298 if (atomic_long_read(&page->pp_frag_count) == nr) in page_pool_defrag_page()
301 ret = atomic_long_sub_return(nr, &page->pp_frag_count); in page_pool_defrag_page()
306 static inline bool page_pool_is_last_frag(struct page_pool *pool, in page_pool_is_last_frag() argument
310 return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || in page_pool_is_last_frag()
314 static inline void page_pool_put_page(struct page_pool *pool, in page_pool_put_page() argument
319 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't in page_pool_put_page()
323 if (!page_pool_is_last_frag(pool, page)) in page_pool_put_page()
326 page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_page()
330 /* Same as above but will try to sync the entire area pool->max_len */
331 static inline void page_pool_put_full_page(struct page_pool *pool, in page_pool_put_full_page() argument
334 page_pool_put_page(pool, page, -1, allow_direct); in page_pool_put_full_page()
338 static inline void page_pool_recycle_direct(struct page_pool *pool, in page_pool_recycle_direct() argument
341 page_pool_put_full_page(pool, page, true); in page_pool_recycle_direct()
349 dma_addr_t ret = page->dma_addr; in page_pool_get_dma_addr()
352 ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; in page_pool_get_dma_addr()
359 page->dma_addr = addr; in page_pool_set_dma_addr()
361 page->dma_addr_upper = upper_32_bits(addr); in page_pool_set_dma_addr()
373 static inline bool page_pool_put(struct page_pool *pool) in page_pool_put() argument
375 return refcount_dec_and_test(&pool->user_cnt); in page_pool_put()
379 void page_pool_update_nid(struct page_pool *pool, int new_nid);
380 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) in page_pool_nid_changed() argument
382 if (unlikely(pool->p.nid != new_nid)) in page_pool_nid_changed()
383 page_pool_update_nid(pool, new_nid); in page_pool_nid_changed()
386 static inline void page_pool_ring_lock(struct page_pool *pool) in page_pool_ring_lock() argument
387 __acquires(&pool->ring.producer_lock) in page_pool_ring_lock()
390 spin_lock(&pool->ring.producer_lock); in page_pool_ring_lock()
392 spin_lock_bh(&pool->ring.producer_lock); in page_pool_ring_lock()
395 static inline void page_pool_ring_unlock(struct page_pool *pool) in page_pool_ring_unlock() argument
396 __releases(&pool->ring.producer_lock) in page_pool_ring_unlock()
399 spin_unlock(&pool->ring.producer_lock); in page_pool_ring_unlock()
401 spin_unlock_bh(&pool->ring.producer_lock); in page_pool_ring_unlock()