Lines Matching +full:max +full:- +full:functions

1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
52 * Documentation/core-api/circular-buffers.rst. For the Rx and
59 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
61 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
76 * between ->producer and data.
78 * (A) is a control dependency that separates the load of ->consumer
79 * from the stores of $data. In case ->consumer indicates there is no
112 /* Functions that read and validate content from consumer rings. */
116 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked()
117 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()
119 *addr = ring->desc[idx]; in __xskq_cons_read_addr_unchecked()
124 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
125 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
137 chunk = xp_aligned_extract_addr(pool, desc->addr); in xp_aligned_validate_desc()
138 if (likely(desc->len)) { in xp_aligned_validate_desc()
139 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1); in xp_aligned_validate_desc()
144 if (chunk >= pool->addrs_cnt) in xp_aligned_validate_desc()
147 if (desc->options) in xp_aligned_validate_desc()
157 base_addr = xp_unaligned_extract_addr(desc->addr); in xp_unaligned_validate_desc()
158 addr = xp_unaligned_add_offset_to_addr(desc->addr); in xp_unaligned_validate_desc()
160 if (desc->len > pool->chunk_size) in xp_unaligned_validate_desc()
163 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || in xp_unaligned_validate_desc()
164 xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) in xp_unaligned_validate_desc()
167 if (desc->options) in xp_unaligned_validate_desc()
175 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : in xp_validate_desc()
184 q->invalid_descs++; in xskq_cons_is_valid_desc()
194 while (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
195 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
196 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
198 *desc = ring->desc[idx]; in xskq_cons_read_desc()
202 q->cached_cons++; in xskq_cons_read_desc()
210 q->cached_cons += cnt; in xskq_cons_release_n()
214 u32 max) in xskq_cons_read_desc_batch() argument
216 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch()
217 struct xdp_desc *descs = pool->tx_descs; in xskq_cons_read_desc_batch()
219 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()
220 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc_batch()
221 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_desc_batch()
223 descs[nb_entries] = ring->desc[idx]; in xskq_cons_read_desc_batch()
235 xskq_cons_release_n(q, cached_cons - q->cached_cons); in xskq_cons_read_desc_batch()
239 /* Functions for consumers */
243 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ in __xskq_cons_release()
249 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ in __xskq_cons_peek()
258 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) in xskq_cons_nb_entries() argument
260 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
262 if (entries >= max) in xskq_cons_nb_entries()
263 return max; in xskq_cons_nb_entries()
266 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
268 return entries >= max ? max : entries; in xskq_cons_nb_entries()
278 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
287 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
292 /* To improve performance in the xskq_cons_release functions, only update local state here.
298 q->cached_cons++; in xskq_cons_release()
304 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
307 /* Functions for producers */
309 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) in xskq_prod_nb_free() argument
311 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
313 if (free_entries >= max) in xskq_prod_nb_free()
314 return max; in xskq_prod_nb_free()
317 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_nb_free()
318 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
320 return free_entries >= max ? max : free_entries; in xskq_prod_nb_free()
330 q->cached_prod--; in xskq_prod_cancel()
336 return -ENOSPC; in xskq_prod_reserve()
339 q->cached_prod++; in xskq_prod_reserve()
345 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
348 return -ENOSPC; in xskq_prod_reserve_addr()
351 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
358 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_write_addr_batch()
362 cached_prod = q->cached_prod; in xskq_prod_write_addr_batch()
364 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; in xskq_prod_write_addr_batch()
365 q->cached_prod = cached_prod; in xskq_prod_write_addr_batch()
371 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
375 return -ENOBUFS; in xskq_prod_reserve_desc()
378 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
379 ring->desc[idx].addr = addr; in xskq_prod_reserve_desc()
380 ring->desc[idx].len = len; in xskq_prod_reserve_desc()
387 smp_store_release(&q->ring->producer, idx); /* B, matches C */ in __xskq_prod_submit()
392 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
397 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_submit_addr()
398 u32 idx = q->ring->producer; in xskq_prod_submit_addr()
400 ring->desc[idx++ & q->ring_mask] = addr; in xskq_prod_submit_addr()
407 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
413 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
420 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
425 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()