Home
last modified time | relevance | path

Searched refs:nb_pkts (Results 1 – 5 of 5) sorted by relevance

/Linux-v6.1/tools/testing/selftests/bpf/
Dxskxceiver.c576 if (pkt_nb >= pkt_stream->nb_pkts) in pkt_stream_get_pkt()
584 while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { in pkt_stream_get_next_rx_pkt()
615 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) in __pkt_stream_alloc() argument
623 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); in __pkt_stream_alloc()
629 pkt_stream->nb_pkts = nb_pkts; in __pkt_stream_alloc()
643 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) in pkt_stream_generate() argument
648 pkt_stream = __pkt_stream_alloc(nb_pkts); in pkt_stream_generate()
652 pkt_stream->nb_pkts = nb_pkts; in pkt_stream_generate()
653 for (i = 0; i < nb_pkts; i++) { in pkt_stream_generate()
665 return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); in pkt_stream_clone()
[all …]
Dxskxceiver.h124 u32 nb_pkts; member
/Linux-v6.1/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c507 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, in i40e_fill_tx_hw_ring() argument
512 batched = nb_pkts & ~(PKTS_PER_BATCH - 1); in i40e_fill_tx_hw_ring()
513 leftover = nb_pkts & (PKTS_PER_BATCH - 1); in i40e_fill_tx_hw_ring()
539 u32 nb_pkts, nb_processed = 0; in i40e_xmit_zc() local
542 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); in i40e_xmit_zc()
543 if (!nb_pkts) in i40e_xmit_zc()
546 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { in i40e_xmit_zc()
552 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, in i40e_xmit_zc()
559 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); in i40e_xmit_zc()
561 return nb_pkts < budget; in i40e_xmit_zc()
/Linux-v6.1/net/xdp/
Dxsk.c349 u32 nb_pkts = 0; in xsk_tx_peek_release_fallback() local
351 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) in xsk_tx_peek_release_fallback()
352 nb_pkts++; in xsk_tx_peek_release_fallback()
355 return nb_pkts; in xsk_tx_peek_release_fallback()
358 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) in xsk_tx_peek_release_desc_batch() argument
366 return xsk_tx_peek_release_fallback(pool, nb_pkts); in xsk_tx_peek_release_desc_batch()
371 nb_pkts = 0; in xsk_tx_peek_release_desc_batch()
375 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); in xsk_tx_peek_release_desc_batch()
383 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); in xsk_tx_peek_release_desc_batch()
384 if (!nb_pkts) in xsk_tx_peek_release_desc_batch()
[all …]
/Linux-v6.1/drivers/net/ethernet/intel/ice/
Dice_xsk.c897 u32 nb_pkts, unsigned int *total_bytes) in ice_fill_tx_hw_ring() argument
901 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH); in ice_fill_tx_hw_ring()
902 leftover = nb_pkts & (PKTS_PER_BATCH - 1); in ice_fill_tx_hw_ring()
932 u32 nb_pkts, nb_processed = 0; in ice_xmit_zc() local
941 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); in ice_xmit_zc()
942 if (!nb_pkts) in ice_xmit_zc()
945 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { in ice_xmit_zc()
951 ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, in ice_xmit_zc()
956 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes); in ice_xmit_zc()
961 return nb_pkts < budget; in ice_xmit_zc()