Lines Matching +full:dma +full:- +full:safe +full:- +full:map

1 /* SPDX-License-Identifier: GPL-2.0
12 * uses one-frame-per-page, but have fallbacks that act like the
19 * API keeps track of in-flight pages, in-order to let API user know
20 * when it is safe to dealloactor page_pool object. Thus, API users
27 * will release the DMA mapping and in-flight state accounting. We
35 #include <linux/dma-direction.h>
37 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
38 * map/unmap
42 * DMA-synced-for-device according to
45 * Please note DMA-sync-for-CPU is still
57 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
61 * Keeping room for more objects, is due to XDP_DROP use-case. As
79 struct device *dev; /* device, for DMA pre-mapping purposes */
80 enum dma_data_direction dma_dir; /* DMA mapping direction */
81 unsigned int max_len; /* max DMA sync memory size */
82 unsigned int offset; /* DMA addr offset */
106 * RX-queue. As the RX-queue is already protected by
120 * effeciently, it a way that doesn't bounce cache-lines.
128 /* A page_pool is strictly tied to a single RX-queue being
158 /* get the stored dma direction. A driver might decide to treat this locally and
164 return pool->p.dma_dir; in page_pool_get_dma_dir()
200 /* Same as above but will try to sync the entire area pool->max_len */
204 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't in page_pool_put_full_page()
208 page_pool_put_page(pool, page, -1, allow_direct); in page_pool_put_full_page()
212 /* Same as above but the caller must guarantee safe context. e.g NAPI */
224 dma_addr_t ret = page->dma_addr; in page_pool_get_dma_addr()
227 ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; in page_pool_get_dma_addr()
234 page->dma_addr = addr; in page_pool_set_dma_addr()
236 page->dma_addr_upper = upper_32_bits(addr); in page_pool_set_dma_addr()
241 atomic_long_set(&page->pp_frag_count, nr); in page_pool_set_frag_count()
255 atomic_long_read(&page->pp_frag_count) == nr) in page_pool_atomic_sub_frag_count_return()
258 ret = atomic_long_sub_return(nr, &page->pp_frag_count); in page_pool_atomic_sub_frag_count_return()
274 return refcount_dec_and_test(&pool->user_cnt); in page_pool_put()
277 /* Caller must provide appropriate safe context, e.g. NAPI. */
281 if (unlikely(pool->p.nid != new_nid)) in page_pool_nid_changed()
286 __acquires(&pool->ring.producer_lock) in page_pool_ring_lock()
289 spin_lock(&pool->ring.producer_lock); in page_pool_ring_lock()
291 spin_lock_bh(&pool->ring.producer_lock); in page_pool_ring_lock()
295 __releases(&pool->ring.producer_lock) in page_pool_ring_unlock()
298 spin_unlock(&pool->ring.producer_lock); in page_pool_ring_unlock()
300 spin_unlock_bh(&pool->ring.producer_lock); in page_pool_ring_unlock()