Lines Matching +full:data +full:- +full:mapping
1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
27 dma_addr_t mapping, u32 len, in bnxt_xmit_bd() argument
40 num_frags = sinfo->nr_frags; in bnxt_xmit_bd()
44 prod = txr->tx_prod; in bnxt_xmit_bd()
45 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd()
46 tx_buf->nr_frags = num_frags; in bnxt_xmit_bd()
48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd()
50 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
54 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
55 txbd->tx_bd_opaque = prod; in bnxt_xmit_bd()
56 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd()
60 skb_frag_t *frag = &sinfo->frags[i]; in bnxt_xmit_bd()
62 struct pci_dev *pdev = bp->pdev; in bnxt_xmit_bd()
67 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd()
70 frag_tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd()
71 frag_tx_buf->page = skb_frag_page(frag); in bnxt_xmit_bd()
73 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
76 frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, in bnxt_xmit_bd()
79 if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) in bnxt_xmit_bd()
82 dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); in bnxt_xmit_bd()
85 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
86 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); in bnxt_xmit_bd()
92 txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | in bnxt_xmit_bd()
97 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd()
103 dma_addr_t mapping, u32 len, u16 rx_prod, in __bnxt_xmit_xdp() argument
108 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp()
109 tx_buf->rx_prod = rx_prod; in __bnxt_xmit_xdp()
110 tx_buf->action = XDP_TX; in __bnxt_xmit_xdp()
116 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument
121 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); in __bnxt_xmit_xdp_redirect()
122 tx_buf->action = XDP_REDIRECT; in __bnxt_xmit_xdp_redirect()
123 tx_buf->xdpf = xdpf; in __bnxt_xmit_xdp_redirect()
124 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect()
130 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; in bnxt_tx_int_xdp()
131 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_tx_int_xdp()
133 int nr_pkts = bnapi->tx_pkts; in bnxt_tx_int_xdp()
135 u16 tx_cons = txr->tx_cons; in bnxt_tx_int_xdp()
143 tx_buf = &txr->tx_buf_ring[tx_cons]; in bnxt_tx_int_xdp()
145 if (tx_buf->action == XDP_REDIRECT) { in bnxt_tx_int_xdp()
146 struct pci_dev *pdev = bp->pdev; in bnxt_tx_int_xdp()
148 dma_unmap_single(&pdev->dev, in bnxt_tx_int_xdp()
149 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp()
152 xdp_return_frame(tx_buf->xdpf); in bnxt_tx_int_xdp()
153 tx_buf->action = 0; in bnxt_tx_int_xdp()
154 tx_buf->xdpf = NULL; in bnxt_tx_int_xdp()
155 } else if (tx_buf->action == XDP_TX) { in bnxt_tx_int_xdp()
156 tx_buf->action = 0; in bnxt_tx_int_xdp()
160 frags = tx_buf->nr_frags; in bnxt_tx_int_xdp()
163 tx_buf = &txr->tx_buf_ring[tx_cons]; in bnxt_tx_int_xdp()
164 page_pool_recycle_direct(rxr->page_pool, tx_buf->page); in bnxt_tx_int_xdp()
173 bnapi->tx_pkts = 0; in bnxt_tx_int_xdp()
174 WRITE_ONCE(txr->tx_cons, tx_cons); in bnxt_tx_int_xdp()
176 tx_buf = &txr->tx_buf_ring[last_tx_cons]; in bnxt_tx_int_xdp()
177 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); in bnxt_tx_int_xdp()
184 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_xdp_attached()
196 dma_addr_t mapping; in bnxt_xdp_buff_init() local
199 pdev = bp->pdev; in bnxt_xdp_buff_init()
200 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_xdp_buff_init()
201 offset = bp->rx_offset; in bnxt_xdp_buff_init()
203 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_xdp_buff_init()
204 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); in bnxt_xdp_buff_init()
206 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init()
207 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); in bnxt_xdp_buff_init()
219 for (i = 0; i < shinfo->nr_frags; i++) { in bnxt_xdp_buff_frags_free()
220 struct page *page = skb_frag_page(&shinfo->frags[i]); in bnxt_xdp_buff_frags_free()
222 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_xdp_buff_frags_free()
224 shinfo->nr_frags = 0; in bnxt_xdp_buff_frags_free()
228 * true - packet consumed by XDP and new buffer is allocated.
229 * false - packet should be passed to the stack.
235 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_rx_xdp()
239 dma_addr_t mapping; in bnxt_rx_xdp() local
249 pdev = bp->pdev; in bnxt_rx_xdp()
250 offset = bp->rx_offset; in bnxt_rx_xdp()
252 txr = rxr->bnapi->tx_ring; in bnxt_rx_xdp()
254 orig_data = xdp.data; in bnxt_rx_xdp()
262 if (tx_avail != bp->tx_ring_size) in bnxt_rx_xdp()
265 *len = xdp.data_end - xdp.data; in bnxt_rx_xdp()
266 if (orig_data != xdp.data) { in bnxt_rx_xdp()
267 offset = xdp.data - xdp.data_hard_start; in bnxt_rx_xdp()
276 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
277 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
283 tx_needed += sinfo->nr_frags; in bnxt_rx_xdp()
288 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
294 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, in bnxt_rx_xdp()
295 bp->rx_dir); in bnxt_rx_xdp()
298 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, in bnxt_rx_xdp()
299 NEXT_RX(rxr->rx_prod), &xdp); in bnxt_rx_xdp()
307 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
308 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
309 dma_unmap_page_attrs(&pdev->dev, mapping, in bnxt_rx_xdp()
310 BNXT_RX_PAGE_SIZE, bp->rx_dir, in bnxt_rx_xdp()
314 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { in bnxt_rx_xdp()
315 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
321 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { in bnxt_rx_xdp()
322 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
323 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_xdp()
330 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
333 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
347 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); in bnxt_xdp_xmit()
348 struct pci_dev *pdev = bp->pdev; in bnxt_xdp_xmit()
350 dma_addr_t mapping; in bnxt_xdp_xmit() local
355 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_xdp_xmit()
356 !bp->tx_nr_rings_xdp || in bnxt_xdp_xmit()
358 return -EINVAL; in bnxt_xdp_xmit()
360 ring = smp_processor_id() % bp->tx_nr_rings_xdp; in bnxt_xdp_xmit()
361 txr = &bp->tx_ring[ring]; in bnxt_xdp_xmit()
363 if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) in bnxt_xdp_xmit()
364 return -EINVAL; in bnxt_xdp_xmit()
367 spin_lock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
375 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, in bnxt_xdp_xmit()
378 if (dma_mapping_error(&pdev->dev, mapping)) in bnxt_xdp_xmit()
381 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); in bnxt_xdp_xmit()
386 /* Sync BD data before updating doorbell */ in bnxt_xdp_xmit()
388 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_xdp_xmit()
392 spin_unlock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
400 struct net_device *dev = bp->dev; in bnxt_xdp_set()
404 if (prog && !prog->aux->xdp_has_frags && in bnxt_xdp_set()
405 bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { in bnxt_xdp_set()
407 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); in bnxt_xdp_set()
408 return -EOPNOTSUPP; in bnxt_xdp_set()
410 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { in bnxt_xdp_set()
412 return -EOPNOTSUPP; in bnxt_xdp_set()
415 tx_xdp = bp->rx_nr_rings; in bnxt_xdp_set()
420 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_xdp_set()
429 old = xchg(&bp->xdp_prog, prog); in bnxt_xdp_set()
443 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; in bnxt_xdp_set()
444 bp->dev->hw_features |= NETIF_F_LRO; in bnxt_xdp_set()
447 bp->tx_nr_rings_xdp = tx_xdp; in bnxt_xdp_set()
448 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; in bnxt_xdp_set()
449 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); in bnxt_xdp_set()
464 switch (xdp->command) { in bnxt_xdp()
466 rc = bnxt_xdp_set(bp, xdp->prog); in bnxt_xdp()
469 rc = -EINVAL; in bnxt_xdp()
486 if (bp->dev->features & NETIF_F_RXCSUM) { in bnxt_xdp_build_skb()
487 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnxt_xdp_build_skb()
488 skb->csum_level = RX_CMP_ENCAP(rxcmp1); in bnxt_xdp_build_skb()
492 sinfo->xdp_frags_size, in bnxt_xdp_build_skb()
493 BNXT_RX_PAGE_SIZE * sinfo->nr_frags, in bnxt_xdp_build_skb()