Lines Matching +full:data +full:- +full:mapping

1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
27 dma_addr_t mapping, u32 len, in bnxt_xmit_bd() argument
40 num_frags = sinfo->nr_frags; in bnxt_xmit_bd()
44 prod = txr->tx_prod; in bnxt_xmit_bd()
45 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd()
46 tx_buf->nr_frags = num_frags; in bnxt_xmit_bd()
48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd()
50 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
54 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
55 txbd->tx_bd_opaque = prod; in bnxt_xmit_bd()
56 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd()
60 skb_frag_t *frag = &sinfo->frags[i]; in bnxt_xmit_bd()
62 struct pci_dev *pdev = bp->pdev; in bnxt_xmit_bd()
67 txr->tx_prod = prod; in bnxt_xmit_bd()
70 frag_tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd()
71 frag_tx_buf->page = skb_frag_page(frag); in bnxt_xmit_bd()
73 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
76 frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, in bnxt_xmit_bd()
79 if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) in bnxt_xmit_bd()
82 dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); in bnxt_xmit_bd()
85 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
86 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); in bnxt_xmit_bd()
92 txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | in bnxt_xmit_bd()
97 txr->tx_prod = prod; in bnxt_xmit_bd()
103 dma_addr_t mapping, u32 len, u16 rx_prod, in __bnxt_xmit_xdp() argument
108 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp()
109 tx_buf->rx_prod = rx_prod; in __bnxt_xmit_xdp()
110 tx_buf->action = XDP_TX; in __bnxt_xmit_xdp()
116 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument
121 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); in __bnxt_xmit_xdp_redirect()
122 tx_buf->action = XDP_REDIRECT; in __bnxt_xmit_xdp_redirect()
123 tx_buf->xdpf = xdpf; in __bnxt_xmit_xdp_redirect()
124 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect()
130 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; in bnxt_tx_int_xdp()
131 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_tx_int_xdp()
134 u16 tx_cons = txr->tx_cons; in bnxt_tx_int_xdp()
139 tx_buf = &txr->tx_buf_ring[tx_cons]; in bnxt_tx_int_xdp()
141 if (tx_buf->action == XDP_REDIRECT) { in bnxt_tx_int_xdp()
142 struct pci_dev *pdev = bp->pdev; in bnxt_tx_int_xdp()
144 dma_unmap_single(&pdev->dev, in bnxt_tx_int_xdp()
145 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp()
148 xdp_return_frame(tx_buf->xdpf); in bnxt_tx_int_xdp()
149 tx_buf->action = 0; in bnxt_tx_int_xdp()
150 tx_buf->xdpf = NULL; in bnxt_tx_int_xdp()
151 } else if (tx_buf->action == XDP_TX) { in bnxt_tx_int_xdp()
155 frags = tx_buf->nr_frags; in bnxt_tx_int_xdp()
158 tx_buf = &txr->tx_buf_ring[tx_cons]; in bnxt_tx_int_xdp()
159 page_pool_recycle_direct(rxr->page_pool, tx_buf->page); in bnxt_tx_int_xdp()
164 txr->tx_cons = tx_cons; in bnxt_tx_int_xdp()
166 tx_buf = &txr->tx_buf_ring[last_tx_cons]; in bnxt_tx_int_xdp()
167 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); in bnxt_tx_int_xdp()
174 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_xdp_attached()
186 dma_addr_t mapping; in bnxt_xdp_buff_init() local
189 pdev = bp->pdev; in bnxt_xdp_buff_init()
190 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_xdp_buff_init()
191 offset = bp->rx_offset; in bnxt_xdp_buff_init()
193 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_xdp_buff_init()
194 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); in bnxt_xdp_buff_init()
196 if (bp->xdp_has_frags) in bnxt_xdp_buff_init()
199 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init()
200 xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false); in bnxt_xdp_buff_init()
212 for (i = 0; i < shinfo->nr_frags; i++) { in bnxt_xdp_buff_frags_free()
213 struct page *page = skb_frag_page(&shinfo->frags[i]); in bnxt_xdp_buff_frags_free()
215 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_xdp_buff_frags_free()
217 shinfo->nr_frags = 0; in bnxt_xdp_buff_frags_free()
221 * true - packet consumed by XDP and new buffer is allocated.
222 * false - packet should be passed to the stack.
227 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_rx_xdp()
231 dma_addr_t mapping; in bnxt_rx_xdp() local
241 pdev = bp->pdev; in bnxt_rx_xdp()
242 offset = bp->rx_offset; in bnxt_rx_xdp()
244 txr = rxr->bnapi->tx_ring; in bnxt_rx_xdp()
246 orig_data = xdp.data; in bnxt_rx_xdp()
254 if (tx_avail != bp->tx_ring_size) in bnxt_rx_xdp()
257 *len = xdp.data_end - xdp.data; in bnxt_rx_xdp()
258 if (orig_data != xdp.data) in bnxt_rx_xdp()
259 offset = xdp.data - xdp.data_hard_start; in bnxt_rx_xdp()
266 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
267 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
273 tx_needed += sinfo->nr_frags; in bnxt_rx_xdp()
278 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
284 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, in bnxt_rx_xdp()
285 bp->rx_dir); in bnxt_rx_xdp()
288 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, in bnxt_rx_xdp()
289 NEXT_RX(rxr->rx_prod), &xdp); in bnxt_rx_xdp()
297 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
298 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
299 dma_unmap_page_attrs(&pdev->dev, mapping, in bnxt_rx_xdp()
300 PAGE_SIZE, bp->rx_dir, in bnxt_rx_xdp()
304 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { in bnxt_rx_xdp()
305 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
311 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { in bnxt_rx_xdp()
312 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
313 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_xdp()
320 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
323 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
337 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); in bnxt_xdp_xmit()
338 struct pci_dev *pdev = bp->pdev; in bnxt_xdp_xmit()
340 dma_addr_t mapping; in bnxt_xdp_xmit() local
345 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_xdp_xmit()
346 !bp->tx_nr_rings_xdp || in bnxt_xdp_xmit()
348 return -EINVAL; in bnxt_xdp_xmit()
350 ring = smp_processor_id() % bp->tx_nr_rings_xdp; in bnxt_xdp_xmit()
351 txr = &bp->tx_ring[ring]; in bnxt_xdp_xmit()
353 if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) in bnxt_xdp_xmit()
354 return -EINVAL; in bnxt_xdp_xmit()
357 spin_lock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
365 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, in bnxt_xdp_xmit()
368 if (dma_mapping_error(&pdev->dev, mapping)) in bnxt_xdp_xmit()
371 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); in bnxt_xdp_xmit()
376 /* Sync BD data before updating doorbell */ in bnxt_xdp_xmit()
378 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_xdp_xmit()
382 spin_unlock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
390 struct net_device *dev = bp->dev; in bnxt_xdp_set()
394 if (prog && !prog->aux->xdp_has_frags && in bnxt_xdp_set()
395 bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { in bnxt_xdp_set()
397 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); in bnxt_xdp_set()
398 return -EOPNOTSUPP; in bnxt_xdp_set()
400 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { in bnxt_xdp_set()
402 return -EOPNOTSUPP; in bnxt_xdp_set()
405 tx_xdp = bp->rx_nr_rings; in bnxt_xdp_set()
406 bp->xdp_has_frags = prog->aux->xdp_has_frags; in bnxt_xdp_set()
412 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_xdp_set()
421 old = xchg(&bp->xdp_prog, prog); in bnxt_xdp_set()
433 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; in bnxt_xdp_set()
434 bp->dev->hw_features |= NETIF_F_LRO; in bnxt_xdp_set()
437 bp->tx_nr_rings_xdp = tx_xdp; in bnxt_xdp_set()
438 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; in bnxt_xdp_set()
439 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); in bnxt_xdp_set()
454 switch (xdp->command) { in bnxt_xdp()
456 rc = bnxt_xdp_set(bp, xdp->prog); in bnxt_xdp()
459 rc = -EINVAL; in bnxt_xdp()
476 if (bp->dev->features & NETIF_F_RXCSUM) { in bnxt_xdp_build_skb()
477 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnxt_xdp_build_skb()
478 skb->csum_level = RX_CMP_ENCAP(rxcmp1); in bnxt_xdp_build_skb()
482 sinfo->xdp_frags_size, in bnxt_xdp_build_skb()
483 PAGE_SIZE * sinfo->nr_frags, in bnxt_xdp_build_skb()