Lines Matching refs:q

91 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)  in xskq_nb_invalid_descs()  argument
93 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
96 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) in xskq_nb_avail() argument
98 u32 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail()
102 q->prod_tail = READ_ONCE(q->ring->producer); in xskq_nb_avail()
103 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail()
109 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) in xskq_nb_free() argument
111 u32 free_entries = q->nentries - (producer - q->cons_tail); in xskq_nb_free()
117 q->cons_tail = READ_ONCE(q->ring->consumer); in xskq_nb_free()
118 return q->nentries - (producer - q->cons_tail); in xskq_nb_free()
121 static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt) in xskq_has_addrs() argument
123 u32 entries = q->prod_tail - q->cons_tail; in xskq_has_addrs()
129 q->prod_tail = READ_ONCE(q->ring->producer); in xskq_has_addrs()
130 entries = q->prod_tail - q->cons_tail; in xskq_has_addrs()
148 static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) in xskq_is_valid_addr() argument
150 if (addr >= q->size) { in xskq_is_valid_addr()
151 q->invalid_descs++; in xskq_is_valid_addr()
158 static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, in xskq_is_valid_addr_unaligned() argument
165 if (base_addr >= q->size || addr >= q->size || in xskq_is_valid_addr_unaligned()
167 q->invalid_descs++; in xskq_is_valid_addr_unaligned()
174 static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, in xskq_validate_addr() argument
177 while (q->cons_tail != q->cons_head) { in xskq_validate_addr()
178 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_validate_addr()
179 unsigned int idx = q->cons_tail & q->ring_mask; in xskq_validate_addr()
181 *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; in xskq_validate_addr()
184 if (xskq_is_valid_addr_unaligned(q, *addr, in xskq_validate_addr()
191 if (xskq_is_valid_addr(q, *addr)) in xskq_validate_addr()
195 q->cons_tail++; in xskq_validate_addr()
201 static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr, in xskq_peek_addr() argument
204 if (q->cons_tail == q->cons_head) { in xskq_peek_addr()
206 WRITE_ONCE(q->ring->consumer, q->cons_tail); in xskq_peek_addr()
207 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); in xskq_peek_addr()
213 return xskq_validate_addr(q, addr, umem); in xskq_peek_addr()
216 static inline void xskq_discard_addr(struct xsk_queue *q) in xskq_discard_addr() argument
218 q->cons_tail++; in xskq_discard_addr()
221 static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) in xskq_produce_addr() argument
223 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_produce_addr()
225 if (xskq_nb_free(q, q->prod_tail, 1) == 0) in xskq_produce_addr()
229 ring->desc[q->prod_tail++ & q->ring_mask] = addr; in xskq_produce_addr()
234 WRITE_ONCE(q->ring->producer, q->prod_tail); in xskq_produce_addr()
238 static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr) in xskq_produce_addr_lazy() argument
240 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_produce_addr_lazy()
242 if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0) in xskq_produce_addr_lazy()
246 ring->desc[q->prod_head++ & q->ring_mask] = addr; in xskq_produce_addr_lazy()
250 static inline void xskq_produce_flush_addr_n(struct xsk_queue *q, in xskq_produce_flush_addr_n() argument
256 q->prod_tail += nb_entries; in xskq_produce_flush_addr_n()
257 WRITE_ONCE(q->ring->producer, q->prod_tail); in xskq_produce_flush_addr_n()
260 static inline int xskq_reserve_addr(struct xsk_queue *q) in xskq_reserve_addr() argument
262 if (xskq_nb_free(q, q->prod_head, 1) == 0) in xskq_reserve_addr()
266 q->prod_head++; in xskq_reserve_addr()
272 static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, in xskq_is_valid_desc() argument
276 if (!xskq_is_valid_addr_unaligned(q, d->addr, d->len, umem)) in xskq_is_valid_desc()
280 q->invalid_descs++; in xskq_is_valid_desc()
287 if (!xskq_is_valid_addr(q, d->addr)) in xskq_is_valid_desc()
290 if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) || in xskq_is_valid_desc()
292 q->invalid_descs++; in xskq_is_valid_desc()
299 static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, in xskq_validate_desc() argument
303 while (q->cons_tail != q->cons_head) { in xskq_validate_desc()
304 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_validate_desc()
305 unsigned int idx = q->cons_tail & q->ring_mask; in xskq_validate_desc()
308 if (xskq_is_valid_desc(q, desc, umem)) in xskq_validate_desc()
311 q->cons_tail++; in xskq_validate_desc()
317 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, in xskq_peek_desc() argument
321 if (q->cons_tail == q->cons_head) { in xskq_peek_desc()
323 WRITE_ONCE(q->ring->consumer, q->cons_tail); in xskq_peek_desc()
324 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); in xskq_peek_desc()
330 return xskq_validate_desc(q, desc, umem); in xskq_peek_desc()
333 static inline void xskq_discard_desc(struct xsk_queue *q) in xskq_discard_desc() argument
335 q->cons_tail++; in xskq_discard_desc()
338 static inline int xskq_produce_batch_desc(struct xsk_queue *q, in xskq_produce_batch_desc() argument
341 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_produce_batch_desc()
344 if (xskq_nb_free(q, q->prod_head, 1) == 0) in xskq_produce_batch_desc()
348 idx = (q->prod_head++) & q->ring_mask; in xskq_produce_batch_desc()
355 static inline void xskq_produce_flush_desc(struct xsk_queue *q) in xskq_produce_flush_desc() argument
360 q->prod_tail = q->prod_head; in xskq_produce_flush_desc()
361 WRITE_ONCE(q->ring->producer, q->prod_tail); in xskq_produce_flush_desc()
364 static inline bool xskq_full_desc(struct xsk_queue *q) in xskq_full_desc() argument
366 return xskq_nb_avail(q, q->nentries) == q->nentries; in xskq_full_desc()
369 static inline bool xskq_empty_desc(struct xsk_queue *q) in xskq_empty_desc() argument
371 return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; in xskq_empty_desc()
374 void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);