/Linux-v6.1/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 214 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in i40e_run_xdp_zc() 256 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); in i40e_alloc_rx_buffers_zc() 433 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 455 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc() 457 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 459 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 472 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt() 473 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt() 492 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch() 493 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); in i40e_xmit_pkt_batch() [all …]
|
D | i40e_txrx.h | 395 struct xsk_buff_pool *xsk_pool; member
|
D | i40e_txrx.c | 793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring() 1480 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring() 1514 if (rx_ring->xsk_pool) in i40e_clean_rx_ring() 2706 bool wd = ring->xsk_pool ? in i40e_napi_poll() 2734 int cleaned = ring->xsk_pool ? in i40e_napi_poll()
|
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | rx.c | 21 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe() 25 batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units, in mlx5e_xsk_alloc_rx_mpwqe() 35 wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe() 86 rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe() 87 __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe() 158 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); in mlx5e_xsk_alloc_rx_wqes_batched() 160 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); in mlx5e_xsk_alloc_rx_wqes_batched() 162 alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig); in mlx5e_xsk_alloc_rx_wqes_batched() 197 frag->au->xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_wqes() 253 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() [all …]
|
D | tx.c | 63 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
|
D | setup.c | 66 rq->xsk_pool = pool; in mlx5e_init_xsk_rq()
|
/Linux-v6.1/drivers/net/ethernet/netronome/nfp/nfd3/ |
D | xsk.c | 21 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx_xdp() 187 xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool); in nfp_nfd3_xsk_rx() 324 xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused); in nfp_nfd3_xsk_complete() 337 struct xsk_buff_pool *xsk_pool; in nfp_nfd3_xsk_tx() local 342 xsk_pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx() 346 if (!xsk_tx_peek_desc(xsk_pool, &desc[i])) in nfp_nfd3_xsk_tx() 356 xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr, in nfp_nfd3_xsk_tx() 368 xsk_buff_raw_get_dma(xsk_pool, desc[i].addr)); in nfp_nfd3_xsk_tx() 381 xsk_tx_release(xsk_pool); in nfp_nfd3_xsk_tx()
|
D | rings.c | 25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free() 29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free()
|
/Linux-v6.1/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 490 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, in __ice_alloc_rx_bufs_zc() 504 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count); in __ice_alloc_rx_bufs_zc() 611 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ice_run_xdp_zc() 701 xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); in ice_clean_rx_irq_zc() 755 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ice_clean_rx_irq_zc() 757 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ice_clean_rx_irq_zc() 759 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ice_clean_rx_irq_zc() 833 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); in ice_clean_xdp_irq_zc() 848 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in ice_xmit_pkt() 849 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in ice_xmit_pkt() [all …]
|
D | ice_base.c | 440 if (ring->xsk_pool) in ice_setup_rx_ctx() 509 ring->xsk_pool = ice_xsk_pool(ring); in ice_vsi_cfg_rxq() 510 if (ring->xsk_pool) { in ice_vsi_cfg_rxq() 514 xsk_pool_get_rx_frame_size(ring->xsk_pool); in ice_vsi_cfg_rxq() 520 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq() 546 if (ring->xsk_pool) { in ice_vsi_cfg_rxq() 549 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq()
|
D | ice_txrx.h | 295 struct xsk_buff_pool *xsk_pool; member 318 struct xsk_buff_pool *xsk_pool; member
|
D | ice_txrx.c | 154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring() 398 if (rx_ring->xsk_pool) { in ice_clean_rx_ring() 428 if (rx_ring->xsk_pool) in ice_clean_rx_ring() 458 if (rx_ring->xsk_pool) { in ice_free_rx_ring() 1469 if (tx_ring->xsk_pool) in ice_napi_poll() 1502 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
|
D | ice.h | 724 ring->xsk_pool = NULL; in ice_tx_xsk_pool() 728 ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); in ice_tx_xsk_pool()
|
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_txrx.c | 90 bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post() 98 xsk_set_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post() 102 xsk_clear_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post() 108 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post() 117 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post() 119 xsk_clear_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
|
D | en_main.c | 453 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; in mlx5e_create_rq_umr_mkey() 496 if (rq->xsk_pool) { in mlx5e_init_frags_partition() 771 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); in mlx5e_alloc_rq() 1286 struct xsk_buff_pool *xsk_pool, in mlx5e_alloc_xdpsq() argument 1302 sq->xsk_pool = xsk_pool; in mlx5e_alloc_xdpsq() 1304 sq->stats = sq->xsk_pool ? in mlx5e_alloc_xdpsq() 1791 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, in mlx5e_open_xdpsq() argument 1797 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); in mlx5e_open_xdpsq() 2397 struct xsk_buff_pool *xsk_pool, in mlx5e_open_channel() argument 2439 if (xsk_pool) { in mlx5e_open_channel() [all …]
|
D | en.h | 543 struct xsk_buff_pool *xsk_pool; member 754 struct xsk_buff_pool *xsk_pool; member 1050 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
|
D | en_rx.c | 384 if (rq->xsk_pool) { in mlx5e_free_rx_wqe() 463 if (rq->xsk_pool) { in mlx5e_free_rx_mpwqe() 782 if (!rq->xsk_pool) in mlx5e_post_rx_wqes() 784 else if (likely(!rq->xsk_pool->dma_need_sync)) in mlx5e_post_rx_wqes() 973 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) : in mlx5e_post_rx_mpwqes() 996 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) in mlx5e_post_rx_mpwqes()
|
/Linux-v6.1/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ixgbe_run_xdp_zc() 168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 307 xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 369 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ixgbe_clean_rx_irq_zc() 371 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 373 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc() 466 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq() 538 if (!ring->xsk_pool) in ixgbe_xsk_wakeup() 553 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_xsk_clean_tx_ring()
|
/Linux-v6.1/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_xsk.c | 22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); in nfp_net_xsk_rx_bufs_stash() 60 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_net_xsk_rx_ring_fill_freelist()
|
D | nfp_net_debugfs.c | 46 if (!r_vec->xsk_pool) { in nfp_rx_q_show()
|
D | nfp_net_dp.c | 108 if (rx_ring->r_vec->xsk_pool) { in nfp_net_rx_ring_reset()
|
/Linux-v6.1/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_main.c | 238 if (rx_q->xsk_pool) { in stmmac_disable_all_queues() 1611 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc() 1656 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings() 1658 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings() 1665 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings() 1675 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings() 1726 if (rx_q->xsk_pool) in init_dma_rx_desc_rings() 1732 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings() 1772 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings() 1859 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs() [all …]
|
D | stmmac.h | 73 struct xsk_buff_pool *xsk_pool; member 99 struct xsk_buff_pool *xsk_pool; member
|
/Linux-v6.1/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 252 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring() 253 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring() 431 if (ring->xsk_pool) in igc_clean_rx_ring() 603 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring() 604 if (ring->xsk_pool) { in igc_configure_rx_ring() 608 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring() 637 if (ring->xsk_pool) in igc_configure_rx_ring() 638 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring() 700 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring() 2091 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc() [all …]
|
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | xdp.c | 568 xsk_tx_completed(sq->xsk_pool, xsk_frames); in mlx5e_poll_xdpsq_cq() 606 xsk_tx_completed(sq->xsk_pool, xsk_frames); in mlx5e_free_xdpsq_descs()
|