Lines Matching +full:max +full:- +full:functions
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
52 * Documentation/core-api/circular-buffers.rst. For the Rx and
59 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
61 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
76 * between ->producer and data.
78 * (A) is a control dependency that separates the load of ->consumer
79 * from the stores of $data. In case ->consumer indicates there is no
112 /* Functions that read and validate content from consumer rings. */
116 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_cons_read_addr_unchecked()
118 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
119 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_addr_unchecked()
121 *addr = ring->desc[idx]; in xskq_cons_read_addr_unchecked()
133 chunk = xp_aligned_extract_addr(pool, desc->addr); in xp_aligned_validate_desc()
134 if (likely(desc->len)) { in xp_aligned_validate_desc()
135 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1); in xp_aligned_validate_desc()
140 if (chunk >= pool->addrs_cnt) in xp_aligned_validate_desc()
143 if (desc->options) in xp_aligned_validate_desc()
153 base_addr = xp_unaligned_extract_addr(desc->addr); in xp_unaligned_validate_desc()
154 addr = xp_unaligned_add_offset_to_addr(desc->addr); in xp_unaligned_validate_desc()
156 if (desc->len > pool->chunk_size) in xp_unaligned_validate_desc()
159 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || in xp_unaligned_validate_desc()
160 xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) in xp_unaligned_validate_desc()
163 if (desc->options) in xp_unaligned_validate_desc()
171 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : in xp_validate_desc()
180 q->invalid_descs++; in xskq_cons_is_valid_desc()
190 while (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
191 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
192 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
194 *desc = ring->desc[idx]; in xskq_cons_read_desc()
198 q->cached_cons++; in xskq_cons_read_desc()
206 struct xsk_buff_pool *pool, u32 max) in xskq_cons_read_desc_batch() argument
208 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch()
210 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()
211 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc_batch()
212 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_desc_batch()
214 descs[nb_entries] = ring->desc[idx]; in xskq_cons_read_desc_batch()
228 /* Functions for consumers */
232 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ in __xskq_cons_release()
238 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ in __xskq_cons_peek()
247 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) in xskq_cons_nb_entries() argument
249 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
251 if (entries >= max) in xskq_cons_nb_entries()
252 return max; in xskq_cons_nb_entries()
255 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
257 return entries >= max ? max : entries; in xskq_cons_nb_entries()
267 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
276 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
282 struct xsk_buff_pool *pool, u32 max) in xskq_cons_peek_desc_batch() argument
284 u32 entries = xskq_cons_nb_entries(q, max); in xskq_cons_peek_desc_batch()
289 /* To improve performance in the xskq_cons_release functions, only update local state here.
295 q->cached_cons++; in xskq_cons_release()
300 q->cached_cons += cnt; in xskq_cons_release_n()
306 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == in xskq_cons_is_full()
307 q->nentries; in xskq_cons_is_full()
313 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
316 /* Functions for producers */
318 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) in xskq_prod_nb_free() argument
320 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
322 if (free_entries >= max) in xskq_prod_nb_free()
323 return max; in xskq_prod_nb_free()
326 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_nb_free()
327 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
329 return free_entries >= max ? max : free_entries; in xskq_prod_nb_free()
339 q->cached_prod--; in xskq_prod_cancel()
345 return -ENOSPC; in xskq_prod_reserve()
348 q->cached_prod++; in xskq_prod_reserve()
354 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
357 return -ENOSPC; in xskq_prod_reserve_addr()
360 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
365 u32 max) in xskq_prod_reserve_addr_batch() argument
367 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr_batch()
370 nb_entries = xskq_prod_nb_free(q, max); in xskq_prod_reserve_addr_batch()
373 cached_prod = q->cached_prod; in xskq_prod_reserve_addr_batch()
375 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; in xskq_prod_reserve_addr_batch()
376 q->cached_prod = cached_prod; in xskq_prod_reserve_addr_batch()
384 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
388 return -ENOSPC; in xskq_prod_reserve_desc()
391 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
392 ring->desc[idx].addr = addr; in xskq_prod_reserve_desc()
393 ring->desc[idx].len = len; in xskq_prod_reserve_desc()
400 smp_store_release(&q->ring->producer, idx); /* B, matches C */ in __xskq_prod_submit()
405 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
410 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_submit_addr()
411 u32 idx = q->ring->producer; in xskq_prod_submit_addr()
413 ring->desc[idx++ & q->ring_mask] = addr; in xskq_prod_submit_addr()
420 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
426 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
433 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
438 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()