Lines Matching +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0
15 memset(rx_ring->rx_bi_zc, 0, in i40e_clear_rx_bi_zc()
16 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); in i40e_clear_rx_bi_zc()
21 return &rx_ring->rx_bi_zc[idx]; in i40e_rx_bi()
25 * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
27 * @pool_present: is pool for XSK present
35 size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : in i40e_realloc_rx_xdp_bi()
36 sizeof(*rx_ring->rx_bi); in i40e_realloc_rx_xdp_bi()
37 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); in i40e_realloc_rx_xdp_bi()
40 return -ENOMEM; in i40e_realloc_rx_xdp_bi()
43 kfree(rx_ring->rx_bi); in i40e_realloc_rx_xdp_bi()
44 rx_ring->rx_bi = NULL; in i40e_realloc_rx_xdp_bi()
45 rx_ring->rx_bi_zc = sw_ring; in i40e_realloc_rx_xdp_bi()
47 kfree(rx_ring->rx_bi_zc); in i40e_realloc_rx_xdp_bi()
48 rx_ring->rx_bi_zc = NULL; in i40e_realloc_rx_xdp_bi()
49 rx_ring->rx_bi = sw_ring; in i40e_realloc_rx_xdp_bi()
55 * i40e_realloc_rx_bi_zc - reallocate rx SW rings
68 for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { in i40e_realloc_rx_bi_zc()
69 rx_ring = vsi->rx_rings[q]; in i40e_realloc_rx_bi_zc()
71 return -ENOMEM; in i40e_realloc_rx_bi_zc()
77 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
80 * @pool: buffer pool
81 * @qid: Rx ring to associate buffer pool with
86 struct xsk_buff_pool *pool, in i40e_xsk_pool_enable() argument
89 struct net_device *netdev = vsi->netdev; in i40e_xsk_pool_enable()
93 if (vsi->type != I40E_VSI_MAIN) in i40e_xsk_pool_enable()
94 return -EINVAL; in i40e_xsk_pool_enable()
96 if (qid >= vsi->num_queue_pairs) in i40e_xsk_pool_enable()
97 return -EINVAL; in i40e_xsk_pool_enable()
99 if (qid >= netdev->real_num_rx_queues || in i40e_xsk_pool_enable()
100 qid >= netdev->real_num_tx_queues) in i40e_xsk_pool_enable()
101 return -EINVAL; in i40e_xsk_pool_enable()
103 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); in i40e_xsk_pool_enable()
107 set_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_enable()
109 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); in i40e_xsk_pool_enable()
116 err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); in i40e_xsk_pool_enable()
125 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); in i40e_xsk_pool_enable()
134 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
137 * @qid: Rx ring to associate buffer pool with
143 struct net_device *netdev = vsi->netdev; in i40e_xsk_pool_disable()
144 struct xsk_buff_pool *pool; in i40e_xsk_pool_disable() local
148 pool = xsk_get_pool_from_qid(netdev, qid); in i40e_xsk_pool_disable()
149 if (!pool) in i40e_xsk_pool_disable()
150 return -EINVAL; in i40e_xsk_pool_disable()
152 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); in i40e_xsk_pool_disable()
160 clear_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_disable()
161 xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); in i40e_xsk_pool_disable()
164 err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); in i40e_xsk_pool_disable()
176 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
179 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
180 * @qid: Rx ring to (dis)associate buffer pool (from)to
182 * This function enables or disables a buffer pool to a certain ring.
186 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, in i40e_xsk_pool_setup() argument
189 return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : in i40e_xsk_pool_setup()
194 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
211 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp_zc()
214 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in i40e_run_xdp_zc()
225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc()
234 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp_zc()
239 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp_zc()
246 u16 ntu = rx_ring->next_to_use; in i40e_alloc_rx_buffers_zc()
250 dma_addr_t dma; in i40e_alloc_rx_buffers_zc() local
255 nb_buffs = min_t(u16, count, rx_ring->count - ntu); in i40e_alloc_rx_buffers_zc()
256 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); in i40e_alloc_rx_buffers_zc()
261 while (i--) { in i40e_alloc_rx_buffers_zc()
262 dma = xsk_buff_xdp_get_dma(*xdp); in i40e_alloc_rx_buffers_zc()
263 rx_desc->read.pkt_addr = cpu_to_le64(dma); in i40e_alloc_rx_buffers_zc()
264 rx_desc->read.hdr_addr = 0; in i40e_alloc_rx_buffers_zc()
271 if (ntu == rx_ring->count) { in i40e_alloc_rx_buffers_zc()
277 rx_desc->wb.qword1.status_error_len = 0; in i40e_alloc_rx_buffers_zc()
284 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
288 * This functions allocates a new skb from a zero-copy Rx buffer.
295 unsigned int totalsize = xdp->data_end - xdp->data_meta; in i40e_construct_skb_zc()
296 unsigned int metasize = xdp->data - xdp->data_meta; in i40e_construct_skb_zc()
299 net_prefetch(xdp->data_meta); in i40e_construct_skb_zc()
302 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, in i40e_construct_skb_zc()
307 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in i40e_construct_skb_zc()
354 rx_ring->rx_stats.alloc_buff_failed++; in i40e_handle_xdp_result_zc()
366 *rx_bytes = skb->len; in i40e_handle_xdp_result_zc()
368 napi_gro_receive(&rx_ring->q_vector->napi, skb); in i40e_handle_xdp_result_zc()
378 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
387 u16 next_to_clean = rx_ring->next_to_clean; in i40e_clean_rx_irq_zc()
388 u16 count_mask = rx_ring->count - 1; in i40e_clean_rx_irq_zc()
397 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_clean_rx_irq_zc()
408 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_clean_rx_irq_zc()
418 rx_desc->raw.qword[0], in i40e_clean_rx_irq_zc()
433 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
446 rx_ring->next_to_clean = next_to_clean; in i40e_clean_rx_irq_zc()
447 cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; in i40e_clean_rx_irq_zc()
455 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc()
456 if (failure || next_to_clean == rx_ring->next_to_use) in i40e_clean_rx_irq_zc()
457 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
459 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc()
470 dma_addr_t dma; in i40e_xmit_pkt() local
472 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt()
473 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt()
475 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); in i40e_xmit_pkt()
476 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_pkt()
477 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, in i40e_xmit_pkt()
478 0, desc->len, 0); in i40e_xmit_pkt()
480 *total_bytes += desc->len; in i40e_xmit_pkt()
486 u16 ntu = xdp_ring->next_to_use; in i40e_xmit_pkt_batch()
488 dma_addr_t dma; in i40e_xmit_pkt_batch() local
492 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch()
493 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); in i40e_xmit_pkt_batch()
496 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_pkt_batch()
497 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | in i40e_xmit_pkt_batch()
504 xdp_ring->next_to_use = ntu; in i40e_xmit_pkt_batch()
512 batched = nb_pkts & ~(PKTS_PER_BATCH - 1); in i40e_fill_tx_hw_ring()
513 leftover = nb_pkts & (PKTS_PER_BATCH - 1); in i40e_fill_tx_hw_ring()
522 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; in i40e_set_rs_bit()
526 tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); in i40e_set_rs_bit()
530 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
538 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; in i40e_xmit_zc()
542 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); in i40e_xmit_zc()
546 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { in i40e_xmit_zc()
547 nb_processed = xdp_ring->count - xdp_ring->next_to_use; in i40e_xmit_zc()
549 xdp_ring->next_to_use = 0; in i40e_xmit_zc()
552 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, in i40e_xmit_zc()
565 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
572 xdp_return_frame(tx_bi->xdpf); in i40e_clean_xdp_tx_buffer()
573 tx_ring->xdp_tx_active--; in i40e_clean_xdp_tx_buffer()
574 dma_unmap_single(tx_ring->dev, in i40e_clean_xdp_tx_buffer()
575 dma_unmap_addr(tx_bi, dma), in i40e_clean_xdp_tx_buffer()
581 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
589 struct xsk_buff_pool *bp = tx_ring->xsk_pool; in i40e_clean_xdp_tx_irq()
595 if (head_idx < tx_ring->next_to_clean) in i40e_clean_xdp_tx_irq()
596 head_idx += tx_ring->count; in i40e_clean_xdp_tx_irq()
597 completed_frames = head_idx - tx_ring->next_to_clean; in i40e_clean_xdp_tx_irq()
602 if (likely(!tx_ring->xdp_tx_active)) { in i40e_clean_xdp_tx_irq()
607 ntc = tx_ring->next_to_clean; in i40e_clean_xdp_tx_irq()
610 tx_bi = &tx_ring->tx_bi[ntc]; in i40e_clean_xdp_tx_irq()
612 if (tx_bi->xdpf) { in i40e_clean_xdp_tx_irq()
614 tx_bi->xdpf = NULL; in i40e_clean_xdp_tx_irq()
619 if (++ntc >= tx_ring->count) in i40e_clean_xdp_tx_irq()
624 tx_ring->next_to_clean += completed_frames; in i40e_clean_xdp_tx_irq()
625 if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) in i40e_clean_xdp_tx_irq()
626 tx_ring->next_to_clean -= tx_ring->count; in i40e_clean_xdp_tx_irq()
634 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in i40e_clean_xdp_tx_irq()
635 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in i40e_clean_xdp_tx_irq()
641 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
651 struct i40e_vsi *vsi = np->vsi; in i40e_xsk_wakeup()
652 struct i40e_pf *pf = vsi->back; in i40e_xsk_wakeup()
655 if (test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_xsk_wakeup()
656 return -EAGAIN; in i40e_xsk_wakeup()
658 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_xsk_wakeup()
659 return -ENETDOWN; in i40e_xsk_wakeup()
662 return -EINVAL; in i40e_xsk_wakeup()
664 if (queue_id >= vsi->num_queue_pairs) in i40e_xsk_wakeup()
665 return -EINVAL; in i40e_xsk_wakeup()
667 if (!vsi->xdp_rings[queue_id]->xsk_pool) in i40e_xsk_wakeup()
668 return -EINVAL; in i40e_xsk_wakeup()
670 ring = vsi->xdp_rings[queue_id]; in i40e_xsk_wakeup()
678 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) in i40e_xsk_wakeup()
679 i40e_force_wb(vsi, ring->q_vector); in i40e_xsk_wakeup()
686 u16 count_mask = rx_ring->count - 1; in i40e_xsk_clean_rx_ring()
687 u16 ntc = rx_ring->next_to_clean; in i40e_xsk_clean_rx_ring()
688 u16 ntu = rx_ring->next_to_use; in i40e_xsk_clean_rx_ring()
698 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
703 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; in i40e_xsk_clean_tx_ring()
704 struct xsk_buff_pool *bp = tx_ring->xsk_pool; in i40e_xsk_clean_tx_ring()
709 tx_bi = &tx_ring->tx_bi[ntc]; in i40e_xsk_clean_tx_ring()
711 if (tx_bi->xdpf) in i40e_xsk_clean_tx_ring()
716 tx_bi->xdpf = NULL; in i40e_xsk_clean_tx_ring()
719 if (ntc >= tx_ring->count) in i40e_xsk_clean_tx_ring()
728 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
729 * buffer pool attached
732 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
736 struct net_device *netdev = vsi->netdev; in i40e_xsk_any_rx_ring_enabled()
739 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_xsk_any_rx_ring_enabled()