Home
last modified time | relevance | path

Searched refs:xsk_pool (Results 1 – 25 of 32) sorted by relevance

12

/Linux-v6.1/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c214 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in i40e_run_xdp_zc()
256 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); in i40e_alloc_rx_buffers_zc()
433 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
455 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc()
457 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
459 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
472 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt()
473 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt()
492 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch()
493 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); in i40e_xmit_pkt_batch()
[all …]
Di40e_txrx.h395 struct xsk_buff_pool *xsk_pool; member
Di40e_txrx.c793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
1480 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring()
1514 if (rx_ring->xsk_pool) in i40e_clean_rx_ring()
2706 bool wd = ring->xsk_pool ? in i40e_napi_poll()
2734 int cleaned = ring->xsk_pool ? in i40e_napi_poll()
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c21 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
25 batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units, in mlx5e_xsk_alloc_rx_mpwqe()
35 wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe()
86 rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe()
87 __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe()
158 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); in mlx5e_xsk_alloc_rx_wqes_batched()
160 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); in mlx5e_xsk_alloc_rx_wqes_batched()
162 alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig); in mlx5e_xsk_alloc_rx_wqes_batched()
197 frag->au->xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_wqes()
253 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
[all …]
Dtx.c63 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
Dsetup.c66 rq->xsk_pool = pool; in mlx5e_init_xsk_rq()
/Linux-v6.1/drivers/net/ethernet/netronome/nfp/nfd3/
Dxsk.c21 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx_xdp()
187 xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool); in nfp_nfd3_xsk_rx()
324 xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused); in nfp_nfd3_xsk_complete()
337 struct xsk_buff_pool *xsk_pool; in nfp_nfd3_xsk_tx() local
342 xsk_pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx()
346 if (!xsk_tx_peek_desc(xsk_pool, &desc[i])) in nfp_nfd3_xsk_tx()
356 xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr, in nfp_nfd3_xsk_tx()
368 xsk_buff_raw_get_dma(xsk_pool, desc[i].addr)); in nfp_nfd3_xsk_tx()
381 xsk_tx_release(xsk_pool); in nfp_nfd3_xsk_tx()
Drings.c25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free()
29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free()
/Linux-v6.1/drivers/net/ethernet/intel/ice/
Dice_xsk.c490 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, in __ice_alloc_rx_bufs_zc()
504 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count); in __ice_alloc_rx_bufs_zc()
611 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ice_run_xdp_zc()
701 xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); in ice_clean_rx_irq_zc()
755 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ice_clean_rx_irq_zc()
757 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ice_clean_rx_irq_zc()
759 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ice_clean_rx_irq_zc()
833 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); in ice_clean_xdp_irq_zc()
848 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in ice_xmit_pkt()
849 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in ice_xmit_pkt()
[all …]
Dice_base.c440 if (ring->xsk_pool) in ice_setup_rx_ctx()
509 ring->xsk_pool = ice_xsk_pool(ring); in ice_vsi_cfg_rxq()
510 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
514 xsk_pool_get_rx_frame_size(ring->xsk_pool); in ice_vsi_cfg_rxq()
520 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq()
546 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
549 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq()
Dice_txrx.h295 struct xsk_buff_pool *xsk_pool; member
318 struct xsk_buff_pool *xsk_pool; member
Dice_txrx.c154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
398 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
428 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
458 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
1469 if (tx_ring->xsk_pool) in ice_napi_poll()
1502 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
Dice.h724 ring->xsk_pool = NULL; in ice_tx_xsk_pool()
728 ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); in ice_tx_xsk_pool()
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/
Den_txrx.c90 bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
98 xsk_set_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post()
102 xsk_clear_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post()
108 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
117 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
119 xsk_clear_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
Den_main.c453 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; in mlx5e_create_rq_umr_mkey()
496 if (rq->xsk_pool) { in mlx5e_init_frags_partition()
771 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); in mlx5e_alloc_rq()
1286 struct xsk_buff_pool *xsk_pool, in mlx5e_alloc_xdpsq() argument
1302 sq->xsk_pool = xsk_pool; in mlx5e_alloc_xdpsq()
1304 sq->stats = sq->xsk_pool ? in mlx5e_alloc_xdpsq()
1791 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, in mlx5e_open_xdpsq() argument
1797 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); in mlx5e_open_xdpsq()
2397 struct xsk_buff_pool *xsk_pool, in mlx5e_open_channel() argument
2439 if (xsk_pool) { in mlx5e_open_channel()
[all …]
Den.h543 struct xsk_buff_pool *xsk_pool; member
754 struct xsk_buff_pool *xsk_pool; member
1050 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
Den_rx.c384 if (rq->xsk_pool) { in mlx5e_free_rx_wqe()
463 if (rq->xsk_pool) { in mlx5e_free_rx_mpwqe()
782 if (!rq->xsk_pool) in mlx5e_post_rx_wqes()
784 else if (likely(!rq->xsk_pool->dma_need_sync)) in mlx5e_post_rx_wqes()
973 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) : in mlx5e_post_rx_mpwqes()
996 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) in mlx5e_post_rx_mpwqes()
/Linux-v6.1/drivers/net/ethernet/intel/ixgbe/
Dixgbe_xsk.c114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ixgbe_run_xdp_zc()
168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc()
307 xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
369 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ixgbe_clean_rx_irq_zc()
371 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
373 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc()
466 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq()
538 if (!ring->xsk_pool) in ixgbe_xsk_wakeup()
553 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_xsk_clean_tx_ring()
/Linux-v6.1/drivers/net/ethernet/netronome/nfp/
Dnfp_net_xsk.c22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); in nfp_net_xsk_rx_bufs_stash()
60 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_net_xsk_rx_ring_fill_freelist()
Dnfp_net_debugfs.c46 if (!r_vec->xsk_pool) { in nfp_rx_q_show()
Dnfp_net_dp.c108 if (rx_ring->r_vec->xsk_pool) { in nfp_net_rx_ring_reset()
/Linux-v6.1/drivers/net/ethernet/stmicro/stmmac/
Dstmmac_main.c238 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
1611 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1656 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1658 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1665 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1675 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1726 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1732 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1772 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1859 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
[all …]
Dstmmac.h73 struct xsk_buff_pool *xsk_pool; member
99 struct xsk_buff_pool *xsk_pool; member
/Linux-v6.1/drivers/net/ethernet/intel/igc/
Digc_main.c252 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
253 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
431 if (ring->xsk_pool) in igc_clean_rx_ring()
603 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
604 if (ring->xsk_pool) { in igc_configure_rx_ring()
608 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
637 if (ring->xsk_pool) in igc_configure_rx_ring()
638 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
700 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
2091 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c568 xsk_tx_completed(sq->xsk_pool, xsk_frames); in mlx5e_poll_xdpsq_cq()
606 xsk_tx_completed(sq->xsk_pool, xsk_frames); in mlx5e_free_xdpsq_descs()

12