Lines Matching full:dp
66 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) in nfp_net_dma_map_rx() argument
68 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, in nfp_net_dma_map_rx()
69 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, in nfp_net_dma_map_rx()
70 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); in nfp_net_dma_map_rx()
74 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr) in nfp_net_dma_sync_dev_rx() argument
76 dma_sync_single_for_device(dp->dev, dma_addr, in nfp_net_dma_sync_dev_rx()
77 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, in nfp_net_dma_sync_dev_rx()
78 dp->rx_dma_dir); in nfp_net_dma_sync_dev_rx()
81 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) in nfp_net_dma_unmap_rx() argument
83 dma_unmap_single_attrs(dp->dev, dma_addr, in nfp_net_dma_unmap_rx()
84 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, in nfp_net_dma_unmap_rx()
85 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); in nfp_net_dma_unmap_rx()
88 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr, in nfp_net_dma_sync_cpu_rx() argument
91 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM, in nfp_net_dma_sync_cpu_rx()
92 len, dp->rx_dma_dir); in nfp_net_dma_sync_cpu_rx()
437 struct nfp_net_dp *dp = &nn->dp; in nfp_net_irqs_assign() local
440 dp->num_r_vecs = nn->max_r_vecs; in nfp_net_irqs_assign()
444 if (dp->num_rx_rings > dp->num_r_vecs || in nfp_net_irqs_assign()
445 dp->num_tx_rings > dp->num_r_vecs) in nfp_net_irqs_assign()
446 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", in nfp_net_irqs_assign()
447 dp->num_rx_rings, dp->num_tx_rings, in nfp_net_irqs_assign()
448 dp->num_r_vecs); in nfp_net_irqs_assign()
450 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); in nfp_net_irqs_assign()
451 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); in nfp_net_irqs_assign()
452 dp->num_stack_tx_rings = dp->num_tx_rings; in nfp_net_irqs_assign()
524 netif_carrier_on(nn->dp.netdev); in nfp_net_read_link_status()
525 netdev_info(nn->dp.netdev, "NIC Link is Up\n"); in nfp_net_read_link_status()
527 netif_carrier_off(nn->dp.netdev); in nfp_net_read_link_status()
528 netdev_info(nn->dp.netdev, "NIC Link is Down\n"); in nfp_net_read_link_status()
771 * @dp: NFP Net data path struct
780 static void nfp_net_tx_csum(struct nfp_net_dp *dp, in nfp_net_tx_csum() argument
789 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) in nfp_net_tx_csum()
808 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); in nfp_net_tx_csum()
820 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); in nfp_net_tx_csum()
833 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, in nfp_net_tls_tx() argument
842 if (likely(!dp->ktls_tx)) in nfp_net_tls_tx()
872 nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n"); in nfp_net_tls_tx()
993 struct nfp_net_dp *dp; in nfp_net_tx() local
999 dp = &nn->dp; in nfp_net_tx()
1001 tx_ring = &dp->tx_rings[qidx]; in nfp_net_tx()
1007 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", in nfp_net_tx()
1009 nd_q = netdev_get_tx_queue(dp->netdev, qidx); in nfp_net_tx()
1018 skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags); in nfp_net_tx()
1029 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), in nfp_net_tx()
1031 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_net_tx()
1057 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); in nfp_net_tx()
1058 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { in nfp_net_tx()
1074 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, in nfp_net_tx()
1076 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_net_tx()
1099 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_net_tx()
1114 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, in nfp_net_tx()
1123 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, in nfp_net_tx()
1129 nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); in nfp_net_tx()
1148 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_tx_complete() local
1184 dma_unmap_single(dp->dev, tx_buf->dma_addr, in nfp_net_tx_complete()
1192 dma_unmap_page(dp->dev, tx_buf->dma_addr, in nfp_net_tx_complete()
1212 if (!dp->netdev) in nfp_net_tx_complete()
1215 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_net_tx_complete()
1273 * @dp: NFP Net data path struct
1279 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_ring_reset() argument
1297 dma_unmap_single(dp->dev, tx_buf->dma_addr, in nfp_net_tx_ring_reset()
1302 dma_unmap_page(dp->dev, tx_buf->dma_addr, in nfp_net_tx_ring_reset()
1324 if (tx_ring->is_xdp || !dp->netdev) in nfp_net_tx_ring_reset()
1327 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); in nfp_net_tx_ring_reset()
1341 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) in nfp_net_calc_fl_bufsz() argument
1346 fl_bufsz += dp->rx_dma_off; in nfp_net_calc_fl_bufsz()
1347 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_net_calc_fl_bufsz()
1350 fl_bufsz += dp->rx_offset; in nfp_net_calc_fl_bufsz()
1351 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; in nfp_net_calc_fl_bufsz()
1370 * @dp: NFP Net data path struct
1377 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) in nfp_net_rx_alloc_one() argument
1381 if (!dp->xdp_prog) { in nfp_net_rx_alloc_one()
1382 frag = netdev_alloc_frag(dp->fl_bufsz); in nfp_net_rx_alloc_one()
1390 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); in nfp_net_rx_alloc_one()
1394 *dma_addr = nfp_net_dma_map_rx(dp, frag); in nfp_net_rx_alloc_one()
1395 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_net_rx_alloc_one()
1396 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_net_rx_alloc_one()
1397 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_net_rx_alloc_one()
1404 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) in nfp_net_napi_alloc_one() argument
1408 if (!dp->xdp_prog) { in nfp_net_napi_alloc_one()
1409 frag = napi_alloc_frag(dp->fl_bufsz); in nfp_net_napi_alloc_one()
1421 *dma_addr = nfp_net_dma_map_rx(dp, frag); in nfp_net_napi_alloc_one()
1422 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_net_napi_alloc_one()
1423 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_net_napi_alloc_one()
1424 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_net_napi_alloc_one()
1433 * @dp: NFP Net data path struct
1438 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, in nfp_net_rx_give_one() argument
1446 nfp_net_dma_sync_dev_rx(dp, dma_addr); in nfp_net_rx_give_one()
1456 dma_addr + dp->rx_dma_off); in nfp_net_rx_give_one()
1499 * @dp: NFP Net data path struct
1507 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, in nfp_net_rx_ring_bufs_free() argument
1520 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); in nfp_net_rx_ring_bufs_free()
1521 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); in nfp_net_rx_ring_bufs_free()
1529 * @dp: NFP Net data path struct
1533 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, in nfp_net_rx_ring_bufs_alloc() argument
1542 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); in nfp_net_rx_ring_bufs_alloc()
1544 nfp_net_rx_ring_bufs_free(dp, rx_ring); in nfp_net_rx_ring_bufs_alloc()
1554 * @dp: NFP Net data path struct
1558 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, in nfp_net_rx_ring_fill_freelist() argument
1564 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, in nfp_net_rx_ring_fill_freelist()
1584 * @dp: NFP Net data path struct
1590 static void nfp_net_rx_csum(struct nfp_net_dp *dp, in nfp_net_rx_csum() argument
1597 if (!(dp->netdev->features & NETIF_F_RXCSUM)) in nfp_net_rx_csum()
1720 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, in nfp_net_rx_drop() argument
1739 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); in nfp_net_rx_drop()
1745 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, in nfp_net_tx_xdp_buf() argument
1750 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA; in nfp_net_tx_xdp_buf()
1766 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, in nfp_net_tx_xdp_buf()
1777 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); in nfp_net_tx_xdp_buf()
1785 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, in nfp_net_tx_xdp_buf()
1818 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_rx() local
1828 xdp_prog = READ_ONCE(dp->xdp_prog); in nfp_net_rx()
1829 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; in nfp_net_rx()
1878 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_net_rx()
1879 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_net_rx()
1882 pkt_off += dp->rx_offset; in nfp_net_rx()
1892 (dp->rx_offset && meta_len > dp->rx_offset))) { in nfp_net_rx()
1893 nn_dp_warn(dp, "oversized RX packet metadata %u\n", in nfp_net_rx()
1895 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_net_rx()
1899 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, in nfp_net_rx()
1902 if (!dp->chained_metadata_format) { in nfp_net_rx()
1903 nfp_net_set_hash_desc(dp->netdev, &meta, in nfp_net_rx()
1906 if (unlikely(nfp_net_parse_meta(dp->netdev, &meta, in nfp_net_rx()
1910 nn_dp_warn(dp, "invalid RX packet metadata\n"); in nfp_net_rx()
1911 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, in nfp_net_rx()
1938 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, in nfp_net_rx()
1943 trace_xdp_exception(dp->netdev, in nfp_net_rx()
1950 trace_xdp_exception(dp->netdev, xdp_prog, act); in nfp_net_rx()
1953 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_net_rx()
1960 netdev = dp->netdev; in nfp_net_rx()
1962 struct nfp_net *nn = netdev_priv(dp->netdev); in nfp_net_rx()
1966 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, in nfp_net_rx()
1972 nn = netdev_priv(dp->netdev); in nfp_net_rx()
1976 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, in nfp_net_rx()
1987 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_net_rx()
1990 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); in nfp_net_rx()
1992 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); in nfp_net_rx()
1996 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_net_rx()
1998 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); in nfp_net_rx()
2009 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb); in nfp_net_rx()
2114 struct nfp_net_dp *dp; in nfp_ctrl_tx_one() local
2118 dp = &r_vec->nfp_net->dp; in nfp_ctrl_tx_one()
2122 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n"); in nfp_ctrl_tx_one()
2139 nn_dp_warn(dp, "CTRL TX on skb without headroom\n"); in nfp_ctrl_tx_one()
2148 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), in nfp_ctrl_tx_one()
2150 if (dma_mapping_error(dp->dev, dma_addr)) in nfp_ctrl_tx_one()
2181 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n"); in nfp_ctrl_tx_one()
2237 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_ctrl_rx_one() argument
2266 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; in nfp_ctrl_rx_one()
2267 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_ctrl_rx_one()
2270 pkt_off += dp->rx_offset; in nfp_ctrl_rx_one()
2279 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); in nfp_ctrl_rx_one()
2282 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", in nfp_ctrl_rx_one()
2284 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_ctrl_rx_one()
2288 skb = build_skb(rxbuf->frag, dp->fl_bufsz); in nfp_ctrl_rx_one()
2290 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); in nfp_ctrl_rx_one()
2293 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); in nfp_ctrl_rx_one()
2295 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); in nfp_ctrl_rx_one()
2299 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); in nfp_ctrl_rx_one()
2301 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); in nfp_ctrl_rx_one()
2315 struct nfp_net_dp *dp = &nn->dp; in nfp_ctrl_rx() local
2318 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) in nfp_ctrl_rx()
2337 nn_dp_warn(&r_vec->nfp_net->dp, in nfp_ctrl_poll()
2367 if (nn->dp.netdev) { in nfp_net_vecs_init()
2389 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_tx_ring_free() local
2394 dma_free_coherent(dp->dev, tx_ring->size, in nfp_net_tx_ring_free()
2406 * @dp: NFP Net data path struct
2412 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_ring_alloc() argument
2416 tx_ring->cnt = dp->txd_cnt; in nfp_net_tx_ring_alloc()
2419 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, in nfp_net_tx_ring_alloc()
2423 …netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count:… in nfp_net_tx_ring_alloc()
2433 if (!tx_ring->is_xdp && dp->netdev) in nfp_net_tx_ring_alloc()
2434 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, in nfp_net_tx_ring_alloc()
2445 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp, in nfp_net_tx_ring_bufs_free() argument
2457 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr); in nfp_net_tx_ring_bufs_free()
2463 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp, in nfp_net_tx_ring_bufs_alloc() argument
2473 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr); in nfp_net_tx_ring_bufs_alloc()
2475 nfp_net_tx_ring_bufs_free(dp, tx_ring); in nfp_net_tx_ring_bufs_alloc()
2483 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_tx_rings_prepare() argument
2487 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), in nfp_net_tx_rings_prepare()
2489 if (!dp->tx_rings) in nfp_net_tx_rings_prepare()
2492 for (r = 0; r < dp->num_tx_rings; r++) { in nfp_net_tx_rings_prepare()
2495 if (r >= dp->num_stack_tx_rings) in nfp_net_tx_rings_prepare()
2496 bias = dp->num_stack_tx_rings; in nfp_net_tx_rings_prepare()
2498 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], in nfp_net_tx_rings_prepare()
2501 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
2504 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
2512 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
2514 nfp_net_tx_ring_free(&dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
2516 kfree(dp->tx_rings); in nfp_net_tx_rings_prepare()
2520 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) in nfp_net_tx_rings_free() argument
2524 for (r = 0; r < dp->num_tx_rings; r++) { in nfp_net_tx_rings_free()
2525 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free()
2526 nfp_net_tx_ring_free(&dp->tx_rings[r]); in nfp_net_tx_rings_free()
2529 kfree(dp->tx_rings); in nfp_net_tx_rings_free()
2539 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_rx_ring_free() local
2541 if (dp->netdev) in nfp_net_rx_ring_free()
2546 dma_free_coherent(dp->dev, rx_ring->size, in nfp_net_rx_ring_free()
2558 * @dp: NFP Net data path struct
2564 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) in nfp_net_rx_ring_alloc() argument
2568 if (dp->netdev) { in nfp_net_rx_ring_alloc()
2569 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, in nfp_net_rx_ring_alloc()
2575 rx_ring->cnt = dp->rxd_cnt; in nfp_net_rx_ring_alloc()
2577 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, in nfp_net_rx_ring_alloc()
2581 …netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count:… in nfp_net_rx_ring_alloc()
2598 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_rx_rings_prepare() argument
2602 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), in nfp_net_rx_rings_prepare()
2604 if (!dp->rx_rings) in nfp_net_rx_rings_prepare()
2607 for (r = 0; r < dp->num_rx_rings; r++) { in nfp_net_rx_rings_prepare()
2608 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); in nfp_net_rx_rings_prepare()
2610 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) in nfp_net_rx_rings_prepare()
2613 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) in nfp_net_rx_rings_prepare()
2621 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); in nfp_net_rx_rings_prepare()
2623 nfp_net_rx_ring_free(&dp->rx_rings[r]); in nfp_net_rx_rings_prepare()
2625 kfree(dp->rx_rings); in nfp_net_rx_rings_prepare()
2629 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) in nfp_net_rx_rings_free() argument
2633 for (r = 0; r < dp->num_rx_rings; r++) { in nfp_net_rx_rings_free()
2634 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); in nfp_net_rx_rings_free()
2635 nfp_net_rx_ring_free(&dp->rx_rings[r]); in nfp_net_rx_rings_free()
2638 kfree(dp->rx_rings); in nfp_net_rx_rings_free()
2642 nfp_net_vector_assign_rings(struct nfp_net_dp *dp, in nfp_net_vector_assign_rings() argument
2645 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; in nfp_net_vector_assign_rings()
2647 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; in nfp_net_vector_assign_rings()
2649 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? in nfp_net_vector_assign_rings()
2650 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; in nfp_net_vector_assign_rings()
2660 if (nn->dp.netdev) in nfp_net_prepare_vector()
2661 netif_napi_add(nn->dp.netdev, &r_vec->napi, in nfp_net_prepare_vector()
2671 if (nn->dp.netdev) in nfp_net_prepare_vector()
2693 if (nn->dp.netdev) in nfp_net_cleanup_vector()
2746 for (i = 0; i < nn->dp.num_rx_rings; i++) in nfp_net_coalesce_write_cfg()
2752 for (i = 0; i < nn->dp.num_tx_rings; i++) in nfp_net_coalesce_write_cfg()
2794 new_ctrl = nn->dp.ctrl; in nfp_net_clear_config_and_disable()
2811 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_clear_config_and_disable()
2812 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); in nfp_net_clear_config_and_disable()
2813 for (r = 0; r < nn->dp.num_tx_rings; r++) in nfp_net_clear_config_and_disable()
2814 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); in nfp_net_clear_config_and_disable()
2815 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_net_clear_config_and_disable()
2818 nn->dp.ctrl = new_ctrl; in nfp_net_clear_config_and_disable()
2850 new_ctrl = nn->dp.ctrl; in nfp_net_set_config_and_enable()
2852 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) { in nfp_net_set_config_and_enable()
2859 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) { in nfp_net_set_config_and_enable()
2864 for (r = 0; r < nn->dp.num_tx_rings; r++) in nfp_net_set_config_and_enable()
2865 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); in nfp_net_set_config_and_enable()
2866 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_set_config_and_enable()
2867 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); in nfp_net_set_config_and_enable()
2869 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? in nfp_net_set_config_and_enable()
2870 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); in nfp_net_set_config_and_enable()
2872 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? in nfp_net_set_config_and_enable()
2873 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); in nfp_net_set_config_and_enable()
2875 if (nn->dp.netdev) in nfp_net_set_config_and_enable()
2876 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); in nfp_net_set_config_and_enable()
2878 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu); in nfp_net_set_config_and_enable()
2880 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; in nfp_net_set_config_and_enable()
2898 nn->dp.ctrl = new_ctrl; in nfp_net_set_config_and_enable()
2900 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_set_config_and_enable()
2901 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); in nfp_net_set_config_and_enable()
2916 netif_carrier_off(nn->dp.netdev); in nfp_net_close_stack()
2919 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_close_stack()
2932 netif_tx_disable(nn->dp.netdev); in nfp_net_close_stack()
2943 nfp_net_tx_rings_free(&nn->dp); in nfp_net_close_free_all()
2944 nfp_net_rx_rings_free(&nn->dp); in nfp_net_close_free_all()
2946 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_net_close_free_all()
2984 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_ctrl_close()
3067 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_open_stack()
3084 netif_tx_wake_all_queues(nn->dp.netdev); in nfp_net_open_stack()
3106 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_open_alloc_all()
3112 err = nfp_net_rx_rings_prepare(nn, &nn->dp); in nfp_net_open_alloc_all()
3116 err = nfp_net_tx_rings_prepare(nn, &nn->dp); in nfp_net_open_alloc_all()
3121 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); in nfp_net_open_alloc_all()
3126 nfp_net_rx_rings_free(&nn->dp); in nfp_net_open_alloc_all()
3128 r = nn->dp.num_r_vecs; in nfp_net_open_alloc_all()
3152 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); in nfp_net_netdev_open()
3156 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); in nfp_net_netdev_open()
3208 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_ctrl_open()
3227 new_ctrl = nn->dp.ctrl; in nfp_net_set_rx_mode()
3243 if (new_ctrl == nn->dp.ctrl) in nfp_net_set_rx_mode()
3249 nn->dp.ctrl = new_ctrl; in nfp_net_set_rx_mode()
3258 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); in nfp_net_rss_init_itbl()
3261 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_dp_swap() argument
3263 struct nfp_net_dp new_dp = *dp; in nfp_net_dp_swap()
3265 *dp = nn->dp; in nfp_net_dp_swap()
3266 nn->dp = new_dp; in nfp_net_dp_swap()
3268 nn->dp.netdev->mtu = new_dp.mtu; in nfp_net_dp_swap()
3270 if (!netif_is_rxfh_configured(nn->dp.netdev)) in nfp_net_dp_swap()
3274 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_dp_swap_enable() argument
3279 nfp_net_dp_swap(nn, dp); in nfp_net_dp_swap_enable()
3282 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); in nfp_net_dp_swap_enable()
3284 err = netif_set_real_num_queues(nn->dp.netdev, in nfp_net_dp_swap_enable()
3285 nn->dp.num_stack_tx_rings, in nfp_net_dp_swap_enable()
3286 nn->dp.num_rx_rings); in nfp_net_dp_swap_enable()
3301 *new = nn->dp; in nfp_net_clone_dp()
3314 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_net_check_config() argument
3318 if (!dp->xdp_prog) in nfp_net_check_config()
3320 if (dp->fl_bufsz > PAGE_SIZE) { in nfp_net_check_config()
3324 if (dp->num_tx_rings > nn->max_tx_rings) { in nfp_net_check_config()
3332 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_net_ring_reconfig() argument
3337 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); in nfp_net_ring_reconfig()
3339 dp->num_stack_tx_rings = dp->num_tx_rings; in nfp_net_ring_reconfig()
3340 if (dp->xdp_prog) in nfp_net_ring_reconfig()
3341 dp->num_stack_tx_rings -= dp->num_rx_rings; in nfp_net_ring_reconfig()
3343 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); in nfp_net_ring_reconfig()
3345 err = nfp_net_check_config(nn, dp, extack); in nfp_net_ring_reconfig()
3349 if (!netif_running(dp->netdev)) { in nfp_net_ring_reconfig()
3350 nfp_net_dp_swap(nn, dp); in nfp_net_ring_reconfig()
3356 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { in nfp_net_ring_reconfig()
3359 dp->num_r_vecs = r; in nfp_net_ring_reconfig()
3364 err = nfp_net_rx_rings_prepare(nn, dp); in nfp_net_ring_reconfig()
3368 err = nfp_net_tx_rings_prepare(nn, dp); in nfp_net_ring_reconfig()
3376 err = nfp_net_dp_swap_enable(nn, dp); in nfp_net_ring_reconfig()
3383 err2 = nfp_net_dp_swap_enable(nn, dp); in nfp_net_ring_reconfig()
3388 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) in nfp_net_ring_reconfig()
3391 nfp_net_rx_rings_free(dp); in nfp_net_ring_reconfig()
3392 nfp_net_tx_rings_free(dp); in nfp_net_ring_reconfig()
3396 kfree(dp); in nfp_net_ring_reconfig()
3401 nfp_net_rx_rings_free(dp); in nfp_net_ring_reconfig()
3403 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) in nfp_net_ring_reconfig()
3405 kfree(dp); in nfp_net_ring_reconfig()
3412 struct nfp_net_dp *dp; in nfp_net_change_mtu() local
3419 dp = nfp_net_clone_dp(nn); in nfp_net_change_mtu()
3420 if (!dp) in nfp_net_change_mtu()
3423 dp->mtu = new_mtu; in nfp_net_change_mtu()
3425 return nfp_net_ring_reconfig(nn, dp, NULL); in nfp_net_change_mtu()
3528 new_ctrl = nn->dp.ctrl; in nfp_net_set_features()
3587 if (new_ctrl == nn->dp.ctrl) in nfp_net_set_features()
3590 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); in nfp_net_set_features()
3596 nn->dp.ctrl = new_ctrl; in nfp_net_set_features()
3662 if (nn->dp.is_vf || nn->vnic_no_name) in nfp_net_get_phys_port_name()
3675 struct nfp_net_dp *dp; in nfp_net_xdp_setup_drv() local
3678 if (!prog == !nn->dp.xdp_prog) { in nfp_net_xdp_setup_drv()
3679 WRITE_ONCE(nn->dp.xdp_prog, prog); in nfp_net_xdp_setup_drv()
3684 dp = nfp_net_clone_dp(nn); in nfp_net_xdp_setup_drv()
3685 if (!dp) in nfp_net_xdp_setup_drv()
3688 dp->xdp_prog = prog; in nfp_net_xdp_setup_drv()
3689 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; in nfp_net_xdp_setup_drv()
3690 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in nfp_net_xdp_setup_drv()
3691 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; in nfp_net_xdp_setup_drv()
3694 err = nfp_net_ring_reconfig(nn, dp, bpf->extack); in nfp_net_xdp_setup_drv()
3814 nn->dp.is_vf ? "VF " : "", in nfp_net_info()
3815 nn->dp.num_tx_rings, nn->max_tx_rings, in nfp_net_info()
3816 nn->dp.num_rx_rings, nn->max_rx_rings); in nfp_net_info()
3878 nn->dp.netdev = netdev; in nfp_net_alloc()
3885 nn->dp.dev = &pdev->dev; in nfp_net_alloc()
3886 nn->dp.ctrl_bar = ctrl_bar; in nfp_net_alloc()
3892 nn->dp.num_tx_rings = min_t(unsigned int, in nfp_net_alloc()
3894 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, in nfp_net_alloc()
3897 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); in nfp_net_alloc()
3898 nn->dp.num_r_vecs = min_t(unsigned int, in nfp_net_alloc()
3899 nn->dp.num_r_vecs, num_online_cpus()); in nfp_net_alloc()
3901 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; in nfp_net_alloc()
3902 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; in nfp_net_alloc()
3911 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, in nfp_net_alloc()
3923 if (nn->dp.netdev) in nfp_net_alloc()
3924 free_netdev(nn->dp.netdev); in nfp_net_alloc()
3939 if (nn->dp.netdev) in nfp_net_free()
3940 free_netdev(nn->dp.netdev); in nfp_net_free()
3984 dev_warn(nn->dp.dev, in nfp_net_rss_init()
4018 struct net_device *netdev = nn->dp.netdev; in nfp_net_netdev_init()
4020 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); in nfp_net_netdev_init()
4022 netdev->mtu = nn->dp.mtu; in nfp_net_netdev_init()
4036 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; in nfp_net_netdev_init()
4040 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; in nfp_net_netdev_init()
4044 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; in nfp_net_netdev_init()
4049 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: in nfp_net_netdev_init()
4058 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; in nfp_net_netdev_init()
4063 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; in nfp_net_netdev_init()
4072 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; in nfp_net_netdev_init()
4079 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; in nfp_net_netdev_init()
4084 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; in nfp_net_netdev_init()
4094 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; in nfp_net_netdev_init()
4121 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 || in nfp_net_read_caps()
4122 !nn->dp.netdev || in nfp_net_read_caps()
4128 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4) in nfp_net_read_caps()
4140 nn->dp.rx_offset = reg; in nfp_net_read_caps()
4142 nn->dp.rx_offset = NFP_NET_RX_OFFSET; in nfp_net_read_caps()
4146 if (!nn->dp.netdev) in nfp_net_read_caps()
4162 nn->dp.rx_dma_dir = DMA_FROM_DEVICE; in nfp_net_init()
4170 nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); in nfp_net_init()
4172 nn->dp.mtu = nn->max_mtu; in nfp_net_init()
4174 nn->dp.mtu = NFP_NET_DEFAULT_MTU; in nfp_net_init()
4176 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); in nfp_net_init()
4179 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA; in nfp_net_init()
4183 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?: in nfp_net_init()
4189 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; in nfp_net_init()
4194 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; in nfp_net_init()
4209 if (nn->dp.netdev) { in nfp_net_init()
4223 if (!nn->dp.netdev) in nfp_net_init()
4225 return register_netdev(nn->dp.netdev); in nfp_net_init()
4238 if (!nn->dp.netdev) in nfp_net_clean()
4241 unregister_netdev(nn->dp.netdev); in nfp_net_clean()