Lines Matching refs:q

114 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)  in xskq_cons_read_addr_unchecked()  argument
116 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_cons_read_addr_unchecked()
118 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
119 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_addr_unchecked()
175 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument
180 q->invalid_descs++; in xskq_cons_is_valid_desc()
186 static inline bool xskq_cons_read_desc(struct xsk_queue *q, in xskq_cons_read_desc() argument
190 while (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
191 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
192 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
195 if (xskq_cons_is_valid_desc(q, desc, pool)) in xskq_cons_read_desc()
198 q->cached_cons++; in xskq_cons_read_desc()
204 static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, in xskq_cons_read_desc_batch() argument
208 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch()
210 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()
211 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc_batch()
212 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_desc_batch()
215 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) { in xskq_cons_read_desc_batch()
230 static inline void __xskq_cons_release(struct xsk_queue *q) in __xskq_cons_release() argument
232 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ in __xskq_cons_release()
235 static inline void __xskq_cons_peek(struct xsk_queue *q) in __xskq_cons_peek() argument
238 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ in __xskq_cons_peek()
241 static inline void xskq_cons_get_entries(struct xsk_queue *q) in xskq_cons_get_entries() argument
243 __xskq_cons_release(q); in xskq_cons_get_entries()
244 __xskq_cons_peek(q); in xskq_cons_get_entries()
247 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) in xskq_cons_nb_entries() argument
249 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
254 __xskq_cons_peek(q); in xskq_cons_nb_entries()
255 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
260 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) in xskq_cons_has_entries() argument
262 return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false; in xskq_cons_has_entries()
265 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_peek_addr_unchecked() argument
267 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
268 xskq_cons_get_entries(q); in xskq_cons_peek_addr_unchecked()
269 return xskq_cons_read_addr_unchecked(q, addr); in xskq_cons_peek_addr_unchecked()
272 static inline bool xskq_cons_peek_desc(struct xsk_queue *q, in xskq_cons_peek_desc() argument
276 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
277 xskq_cons_get_entries(q); in xskq_cons_peek_desc()
278 return xskq_cons_read_desc(q, desc, pool); in xskq_cons_peek_desc()
281 static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs, in xskq_cons_peek_desc_batch() argument
284 u32 entries = xskq_cons_nb_entries(q, max); in xskq_cons_peek_desc_batch()
286 return xskq_cons_read_desc_batch(q, descs, pool, entries); in xskq_cons_peek_desc_batch()
293 static inline void xskq_cons_release(struct xsk_queue *q) in xskq_cons_release() argument
295 q->cached_cons++; in xskq_cons_release()
298 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) in xskq_cons_release_n() argument
300 q->cached_cons += cnt; in xskq_cons_release_n()
303 static inline bool xskq_cons_is_full(struct xsk_queue *q) in xskq_cons_is_full() argument
306 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == in xskq_cons_is_full()
307 q->nentries; in xskq_cons_is_full()
310 static inline u32 xskq_cons_present_entries(struct xsk_queue *q) in xskq_cons_present_entries() argument
313 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
318 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) in xskq_prod_nb_free() argument
320 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
326 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_nb_free()
327 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
332 static inline bool xskq_prod_is_full(struct xsk_queue *q) in xskq_prod_is_full() argument
334 return xskq_prod_nb_free(q, 1) ? false : true; in xskq_prod_is_full()
337 static inline void xskq_prod_cancel(struct xsk_queue *q) in xskq_prod_cancel() argument
339 q->cached_prod--; in xskq_prod_cancel()
342 static inline int xskq_prod_reserve(struct xsk_queue *q) in xskq_prod_reserve() argument
344 if (xskq_prod_is_full(q)) in xskq_prod_reserve()
348 q->cached_prod++; in xskq_prod_reserve()
352 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) in xskq_prod_reserve_addr() argument
354 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
356 if (xskq_prod_is_full(q)) in xskq_prod_reserve_addr()
360 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
364 static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, in xskq_prod_reserve_addr_batch() argument
367 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr_batch()
370 nb_entries = xskq_prod_nb_free(q, max); in xskq_prod_reserve_addr_batch()
373 cached_prod = q->cached_prod; in xskq_prod_reserve_addr_batch()
375 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; in xskq_prod_reserve_addr_batch()
376 q->cached_prod = cached_prod; in xskq_prod_reserve_addr_batch()
381 static inline int xskq_prod_reserve_desc(struct xsk_queue *q, in xskq_prod_reserve_desc() argument
384 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
387 if (xskq_prod_is_full(q)) in xskq_prod_reserve_desc()
391 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
398 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) in __xskq_prod_submit() argument
400 smp_store_release(&q->ring->producer, idx); /* B, matches C */ in __xskq_prod_submit()
403 static inline void xskq_prod_submit(struct xsk_queue *q) in xskq_prod_submit() argument
405 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
408 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) in xskq_prod_submit_addr() argument
410 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_submit_addr()
411 u32 idx = q->ring->producer; in xskq_prod_submit_addr()
413 ring->desc[idx++ & q->ring_mask] = addr; in xskq_prod_submit_addr()
415 __xskq_prod_submit(q, idx); in xskq_prod_submit_addr()
418 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) in xskq_prod_submit_n() argument
420 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
423 static inline bool xskq_prod_is_empty(struct xsk_queue *q) in xskq_prod_is_empty() argument
426 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
431 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) in xskq_nb_invalid_descs() argument
433 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
436 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) in xskq_nb_queue_empty_descs() argument
438 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()