Lines Matching +full:lo +full:- +full:x2 +full:- +full:en
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
16 int num_tx_rings = priv->num_tx_rings; in enetc_num_stack_tx_queues()
19 for (i = 0; i < priv->num_rx_rings; i++) in enetc_num_stack_tx_queues()
20 if (priv->rx_ring[i]->xdp.prog) in enetc_num_stack_tx_queues()
21 return num_tx_rings - num_possible_cpus(); in enetc_num_stack_tx_queues()
29 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring()
31 return priv->rx_ring[index]; in enetc_rx_ring_from_xdp_tx_ring()
36 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) in enetc_tx_swbd_get_skb()
39 return tx_swbd->skb; in enetc_tx_swbd_get_skb()
45 if (tx_swbd->is_xdp_redirect) in enetc_tx_swbd_get_xdp_frame()
46 return tx_swbd->xdp_frame; in enetc_tx_swbd_get_xdp_frame()
58 if (tx_swbd->is_dma_page) in enetc_unmap_tx_buff()
59 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
60 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, in enetc_unmap_tx_buff()
61 tx_swbd->dir); in enetc_unmap_tx_buff()
63 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
64 tx_swbd->len, tx_swbd->dir); in enetc_unmap_tx_buff()
65 tx_swbd->dma = 0; in enetc_unmap_tx_buff()
74 if (tx_swbd->dma) in enetc_free_tx_frame()
78 xdp_return_frame(tx_swbd->xdp_frame); in enetc_free_tx_frame()
79 tx_swbd->xdp_frame = NULL; in enetc_free_tx_frame()
82 tx_swbd->skb = NULL; in enetc_free_tx_frame()
90 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail()
104 return -EINVAL; in enetc_ptp_parse()
108 return -EINVAL; in enetc_ptp_parse()
117 *twostep = hdr->flag_field[0] & 0x2; in enetc_ptp_parse()
120 *correction_offset = (u8 *)&hdr->correction - base; in enetc_ptp_parse()
121 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; in enetc_ptp_parse()
129 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_map_tx_buffs()
130 struct enetc_hw *hw = &priv->si->hw; in enetc_map_tx_buffs()
143 i = tx_ring->next_to_use; in enetc_map_tx_buffs()
147 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); in enetc_map_tx_buffs()
148 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in enetc_map_tx_buffs()
155 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
156 tx_swbd->dma = dma; in enetc_map_tx_buffs()
157 tx_swbd->len = len; in enetc_map_tx_buffs()
158 tx_swbd->is_dma_page = 0; in enetc_map_tx_buffs()
159 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_buffs()
163 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { in enetc_map_tx_buffs()
167 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); in enetc_map_tx_buffs()
170 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { in enetc_map_tx_buffs()
174 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; in enetc_map_tx_buffs()
175 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); in enetc_map_tx_buffs()
176 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; in enetc_map_tx_buffs()
181 if (tx_ring->tsd_enable) in enetc_map_tx_buffs()
185 temp_bd.frm_len = cpu_to_le16(skb->len); in enetc_map_tx_buffs()
189 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, in enetc_map_tx_buffs()
202 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
204 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
211 temp_bd.ext.tpid = 0; /* < C-TAG */ in enetc_map_tx_buffs()
216 u32 lo, hi, val; in enetc_map_tx_buffs() local
220 lo = enetc_rd_hot(hw, ENETC_SICTR0); in enetc_map_tx_buffs()
222 sec = (u64)hi << 32 | lo; in enetc_map_tx_buffs()
226 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); in enetc_map_tx_buffs()
230 * - 48 bits seconds field in enetc_map_tx_buffs()
231 * - 32 bits nanseconds field in enetc_map_tx_buffs()
240 /* Configure single-step register */ in enetc_map_tx_buffs()
249 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in enetc_map_tx_buffs()
257 frag = &skb_shinfo(skb)->frags[0]; in enetc_map_tx_buffs()
258 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { in enetc_map_tx_buffs()
260 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in enetc_map_tx_buffs()
262 if (dma_mapping_error(tx_ring->dev, dma)) in enetc_map_tx_buffs()
272 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
274 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
282 tx_swbd->dma = dma; in enetc_map_tx_buffs()
283 tx_swbd->len = len; in enetc_map_tx_buffs()
284 tx_swbd->is_dma_page = 1; in enetc_map_tx_buffs()
285 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_buffs()
294 tx_ring->tx_swbd[i].is_eof = true; in enetc_map_tx_buffs()
295 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
298 tx_ring->next_to_use = i; in enetc_map_tx_buffs()
307 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_buffs()
310 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
313 i = tx_ring->bd_count; in enetc_map_tx_buffs()
314 i--; in enetc_map_tx_buffs()
315 } while (count--); in enetc_map_tx_buffs()
330 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; in enetc_map_tx_tso_hdr()
346 tx_swbd->len = hdr_len; in enetc_map_tx_tso_hdr()
347 tx_swbd->do_twostep_tstamp = false; in enetc_map_tx_tso_hdr()
348 tx_swbd->check_wb = false; in enetc_map_tx_tso_hdr()
358 tx_swbd = &tx_ring->tx_swbd[*i]; in enetc_map_tx_tso_hdr()
364 txbd_tmp.ext.tpid = 0; /* < C-TAG */ in enetc_map_tx_tso_hdr()
384 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in enetc_map_tx_tso_data()
385 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { in enetc_map_tx_tso_data()
386 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_map_tx_tso_data()
387 return -ENOMEM; in enetc_map_tx_tso_data()
392 tx_swbd->is_eof = 1; in enetc_map_tx_tso_data()
399 tx_swbd->dma = addr; in enetc_map_tx_tso_data()
400 tx_swbd->len = size; in enetc_map_tx_tso_data()
401 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_tso_data()
414 if (tso->tlen != sizeof(struct udphdr)) { in enetc_tso_hdr_csum()
417 tcph->check = 0; in enetc_tso_hdr_csum()
421 udph->check = 0; in enetc_tso_hdr_csum()
427 if (!tso->ipv6) { in enetc_tso_hdr_csum()
430 iph->check = 0; in enetc_tso_hdr_csum()
431 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); in enetc_tso_hdr_csum()
435 *l4_hdr_len = hdr_len - skb_transport_offset(skb); in enetc_tso_hdr_csum()
446 /* Complete the L4 checksum by appending the pseudo-header to the in enetc_tso_complete_csum()
449 if (!tso->ipv6) in enetc_tso_complete_csum()
450 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, in enetc_tso_complete_csum()
451 ip_hdr(skb)->daddr, in enetc_tso_complete_csum()
452 len, ip_hdr(skb)->protocol, sum); in enetc_tso_complete_csum()
454 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in enetc_tso_complete_csum()
455 &ipv6_hdr(skb)->daddr, in enetc_tso_complete_csum()
456 len, ipv6_hdr(skb)->nexthdr, sum); in enetc_tso_complete_csum()
458 if (tso->tlen != sizeof(struct udphdr)) { in enetc_tso_complete_csum()
461 tcph->check = csum_final; in enetc_tso_complete_csum()
465 udph->check = csum_final; in enetc_tso_complete_csum()
481 total_len = skb->len - hdr_len; in enetc_map_tx_tso_buffs()
482 i = tx_ring->next_to_use; in enetc_map_tx_tso_buffs()
489 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
493 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); in enetc_map_tx_tso_buffs()
494 total_len -= data_len; in enetc_map_tx_tso_buffs()
497 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; in enetc_map_tx_tso_buffs()
514 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
531 data_len -= size; in enetc_map_tx_tso_buffs()
543 tx_swbd->skb = skb; in enetc_map_tx_tso_buffs()
549 tx_ring->next_to_use = i; in enetc_map_tx_tso_buffs()
555 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_tso_buffs()
559 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
562 i = tx_ring->bd_count; in enetc_map_tx_tso_buffs()
563 i--; in enetc_map_tx_tso_buffs()
564 } while (count--); in enetc_map_tx_tso_buffs()
576 /* Queue one-step Sync packet if already locked */ in enetc_start_xmit()
577 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { in enetc_start_xmit()
579 &priv->flags)) { in enetc_start_xmit()
580 skb_queue_tail(&priv->tx_skbs, skb); in enetc_start_xmit()
585 tx_ring = priv->tx_ring[skb->queue_mapping]; in enetc_start_xmit()
589 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
597 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) in enetc_start_xmit()
601 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ in enetc_start_xmit()
603 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
607 if (skb->ip_summed == CHECKSUM_PARTIAL) { in enetc_start_xmit()
621 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
636 /* Mark tx timestamp type on skb->cb[0] if requires */ in enetc_xmit()
637 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in enetc_xmit()
638 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { in enetc_xmit()
639 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; in enetc_xmit()
641 skb->cb[0] = 0; in enetc_xmit()
644 /* Fall back to two-step timestamp if not one-step Sync packet */ in enetc_xmit()
645 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { in enetc_xmit()
649 skb->cb[0] = ENETC_F_TX_TSTAMP; in enetc_xmit()
663 enetc_wr_reg_hot(v->rbier, 0); in enetc_msix()
664 enetc_wr_reg_hot(v->ricr1, v->rx_ictt); in enetc_msix()
666 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) in enetc_msix()
667 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); in enetc_msix()
671 napi_schedule(&v->napi); in enetc_msix()
680 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in enetc_rx_dim_work()
684 v->rx_ictt = enetc_usecs_to_cycles(moder.usec); in enetc_rx_dim_work()
685 dim->state = DIM_START_MEASURE; in enetc_rx_dim_work()
692 v->comp_cnt++; in enetc_rx_net_dim()
694 if (!v->rx_napi_work) in enetc_rx_net_dim()
697 dim_update_sample(v->comp_cnt, in enetc_rx_net_dim()
698 v->rx_ring.stats.packets, in enetc_rx_net_dim()
699 v->rx_ring.stats.bytes, in enetc_rx_net_dim()
701 net_dim(&v->rx_dim, dim_sample); in enetc_rx_net_dim()
706 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; in enetc_bd_ready_count()
708 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; in enetc_bd_ready_count()
721 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page()
724 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); in enetc_reuse_page()
733 u32 lo, hi, tstamp_lo; in enetc_get_tx_tstamp() local
735 lo = enetc_rd_hot(hw, ENETC_SICTR0); in enetc_get_tx_tstamp()
737 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); in enetc_get_tx_tstamp()
738 if (lo <= tstamp_lo) in enetc_get_tx_tstamp()
739 hi -= 1; in enetc_get_tx_tstamp()
747 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { in enetc_tstamp_tx()
758 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_recycle_xdp_tx_buff()
760 .dma = tx_swbd->dma, in enetc_recycle_xdp_tx_buff()
761 .page = tx_swbd->page, in enetc_recycle_xdp_tx_buff()
762 .page_offset = tx_swbd->page_offset, in enetc_recycle_xdp_tx_buff()
763 .dir = tx_swbd->dir, in enetc_recycle_xdp_tx_buff()
764 .len = tx_swbd->len, in enetc_recycle_xdp_tx_buff()
774 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
779 rx_ring->stats.recycles++; in enetc_recycle_xdp_tx_buff()
784 rx_ring->stats.recycle_failures++; in enetc_recycle_xdp_tx_buff()
786 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
791 rx_ring->xdp.xdp_tx_in_flight--; in enetc_recycle_xdp_tx_buff()
797 struct net_device *ndev = tx_ring->ndev; in enetc_clean_tx_ring()
804 i = tx_ring->next_to_clean; in enetc_clean_tx_ring()
805 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
814 bool is_eof = tx_swbd->is_eof; in enetc_clean_tx_ring()
816 if (unlikely(tx_swbd->check_wb)) { in enetc_clean_tx_ring()
819 if (txbd->flags & ENETC_TXBD_FLAGS_W && in enetc_clean_tx_ring()
820 tx_swbd->do_twostep_tstamp) { in enetc_clean_tx_ring()
821 enetc_get_tx_tstamp(&priv->si->hw, txbd, in enetc_clean_tx_ring()
826 if (tx_swbd->qbv_en && in enetc_clean_tx_ring()
827 txbd->wb.status & ENETC_TXBD_STATS_WIN) in enetc_clean_tx_ring()
831 if (tx_swbd->is_xdp_tx) in enetc_clean_tx_ring()
833 else if (likely(tx_swbd->dma)) in enetc_clean_tx_ring()
839 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { in enetc_clean_tx_ring()
840 /* Start work to release lock for next one-step in enetc_clean_tx_ring()
844 schedule_work(&priv->tx_onestep_tstamp); in enetc_clean_tx_ring()
852 tx_byte_cnt += tx_swbd->len; in enetc_clean_tx_ring()
858 bds_to_clean--; in enetc_clean_tx_ring()
861 if (unlikely(i == tx_ring->bd_count)) { in enetc_clean_tx_ring()
863 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
869 /* re-arm interrupt source */ in enetc_clean_tx_ring()
870 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | in enetc_clean_tx_ring()
871 BIT(16 + tx_ring->index)); in enetc_clean_tx_ring()
878 tx_ring->next_to_clean = i; in enetc_clean_tx_ring()
879 tx_ring->stats.packets += tx_frm_cnt; in enetc_clean_tx_ring()
880 tx_ring->stats.bytes += tx_byte_cnt; in enetc_clean_tx_ring()
881 tx_ring->stats.win_drop += tx_win_drop; in enetc_clean_tx_ring()
884 __netif_subqueue_stopped(ndev, tx_ring->index) && in enetc_clean_tx_ring()
886 netif_wake_subqueue(ndev, tx_ring->index); in enetc_clean_tx_ring()
895 bool xdp = !!(rx_ring->xdp.prog); in enetc_new_page()
903 /* For XDP_TX, we forgo dma_unmap -> dma_map */ in enetc_new_page()
904 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
906 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
907 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { in enetc_new_page()
913 rx_swbd->dma = addr; in enetc_new_page()
914 rx_swbd->page = page; in enetc_new_page()
915 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
926 i = rx_ring->next_to_use; in enetc_refill_rx_ring()
927 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
932 if (unlikely(!rx_swbd->page)) { in enetc_refill_rx_ring()
934 rx_ring->stats.rx_alloc_errs++; in enetc_refill_rx_ring()
940 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + in enetc_refill_rx_ring()
941 rx_swbd->page_offset); in enetc_refill_rx_ring()
943 rxbd->r.lstatus = 0; in enetc_refill_rx_ring()
946 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
950 rx_ring->next_to_alloc = i; /* keep track from page reuse */ in enetc_refill_rx_ring()
951 rx_ring->next_to_use = i; in enetc_refill_rx_ring()
954 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); in enetc_refill_rx_ring()
967 struct enetc_hw *hw = &priv->si->hw; in enetc_get_rx_tstamp()
968 u32 lo, hi, tstamp_lo; in enetc_get_rx_tstamp() local
971 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { in enetc_get_rx_tstamp()
972 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); in enetc_get_rx_tstamp()
973 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); in enetc_get_rx_tstamp()
975 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); in enetc_get_rx_tstamp()
976 if (lo <= tstamp_lo) in enetc_get_rx_tstamp()
977 hi -= 1; in enetc_get_rx_tstamp()
981 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); in enetc_get_rx_tstamp()
989 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); in enetc_get_offloads()
992 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { in enetc_get_offloads()
993 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); in enetc_get_offloads()
995 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); in enetc_get_offloads()
996 skb->ip_summed = CHECKSUM_COMPLETE; in enetc_get_offloads()
999 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { in enetc_get_offloads()
1002 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { in enetc_get_offloads()
1010 tpid = htons(enetc_port_rd(&priv->si->hw, in enetc_get_offloads()
1014 tpid = htons(enetc_port_rd(&priv->si->hw, in enetc_get_offloads()
1021 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); in enetc_get_offloads()
1025 if (priv->active_offloads & ENETC_F_RX_TSTAMP) in enetc_get_offloads()
1026 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); in enetc_get_offloads()
1030 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
1037 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff()
1039 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
1040 rx_swbd->page_offset, in enetc_get_rx_buff()
1041 size, rx_swbd->dir); in enetc_get_rx_buff()
1045 /* Reuse the current page without performing half-page buffer flipping */
1049 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; in enetc_put_rx_buff()
1053 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
1054 rx_swbd->page_offset, in enetc_put_rx_buff()
1055 buffer_size, rx_swbd->dir); in enetc_put_rx_buff()
1057 rx_swbd->page = NULL; in enetc_put_rx_buff()
1060 /* Reuse the current page by performing half-page buffer flipping */
1064 if (likely(enetc_page_reusable(rx_swbd->page))) { in enetc_flip_rx_buff()
1065 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; in enetc_flip_rx_buff()
1066 page_ref_inc(rx_swbd->page); in enetc_flip_rx_buff()
1070 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
1071 rx_swbd->dir); in enetc_flip_rx_buff()
1072 rx_swbd->page = NULL; in enetc_flip_rx_buff()
1083 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_skb()
1084 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); in enetc_map_rx_buff_to_skb()
1086 rx_ring->stats.rx_alloc_errs++; in enetc_map_rx_buff_to_skb()
1090 skb_reserve(skb, rx_ring->buffer_offset); in enetc_map_rx_buff_to_skb()
1103 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, in enetc_add_rx_buff_to_skb()
1104 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); in enetc_add_rx_buff_to_skb()
1116 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1121 bd_status = le32_to_cpu((*rxbd)->r.lstatus); in enetc_check_bd_errors_and_consume()
1123 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1127 rx_ring->ndev->stats.rx_dropped++; in enetc_check_bd_errors_and_consume()
1128 rx_ring->ndev->stats.rx_errors++; in enetc_check_bd_errors_and_consume()
1140 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_skb()
1153 bd_status = le32_to_cpu((*rxbd)->r.lstatus); in enetc_build_skb()
1158 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_skb()
1168 skb_record_rx_queue(skb, rx_ring->index); in enetc_build_skb()
1169 skb->protocol = eth_type_trans(skb, rx_ring->ndev); in enetc_build_skb()
1184 i = rx_ring->next_to_clean; in enetc_clean_rx_ring()
1192 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, in enetc_clean_rx_ring()
1196 bd_status = le32_to_cpu(rxbd->r.lstatus); in enetc_clean_rx_ring()
1200 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); in enetc_clean_rx_ring()
1212 rx_byte_cnt += skb->len; in enetc_clean_rx_ring()
1218 rx_ring->next_to_clean = i; in enetc_clean_rx_ring()
1220 rx_ring->stats.packets += rx_frm_cnt; in enetc_clean_rx_ring()
1221 rx_ring->stats.bytes += rx_byte_cnt; in enetc_clean_rx_ring()
1235 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); in enetc_xdp_map_tx_buff()
1236 txbd->buf_len = cpu_to_le16(tx_swbd->len); in enetc_xdp_map_tx_buff()
1237 txbd->frm_len = cpu_to_le16(frm_len); in enetc_xdp_map_tx_buff()
1239 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); in enetc_xdp_map_tx_buff()
1249 int i, k, frm_len = tmp_tx_swbd->len; in enetc_xdp_tx()
1254 while (unlikely(!tmp_tx_swbd->is_eof)) { in enetc_xdp_tx()
1256 frm_len += tmp_tx_swbd->len; in enetc_xdp_tx()
1259 i = tx_ring->next_to_use; in enetc_xdp_tx()
1267 if (xdp_tx_swbd->is_eof) { in enetc_xdp_tx()
1270 txbd->flags = ENETC_TXBD_FLAGS_F; in enetc_xdp_tx()
1276 tx_ring->next_to_use = i; in enetc_xdp_tx()
1287 void *data = xdp_frame->data; in enetc_xdp_frame_to_xdp_tx_swbd()
1288 int len = xdp_frame->len; in enetc_xdp_frame_to_xdp_tx_swbd()
1294 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1295 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1296 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1297 return -1; in enetc_xdp_frame_to_xdp_tx_swbd()
1300 xdp_tx_swbd->dma = dma; in enetc_xdp_frame_to_xdp_tx_swbd()
1301 xdp_tx_swbd->dir = DMA_TO_DEVICE; in enetc_xdp_frame_to_xdp_tx_swbd()
1302 xdp_tx_swbd->len = len; in enetc_xdp_frame_to_xdp_tx_swbd()
1303 xdp_tx_swbd->is_xdp_redirect = true; in enetc_xdp_frame_to_xdp_tx_swbd()
1304 xdp_tx_swbd->is_eof = false; in enetc_xdp_frame_to_xdp_tx_swbd()
1305 xdp_tx_swbd->xdp_frame = NULL; in enetc_xdp_frame_to_xdp_tx_swbd()
1312 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; in enetc_xdp_frame_to_xdp_tx_swbd()
1317 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1318 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1320 while (--n >= 0) in enetc_xdp_frame_to_xdp_tx_swbd()
1323 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1324 return -1; in enetc_xdp_frame_to_xdp_tx_swbd()
1327 xdp_tx_swbd->dma = dma; in enetc_xdp_frame_to_xdp_tx_swbd()
1328 xdp_tx_swbd->dir = DMA_TO_DEVICE; in enetc_xdp_frame_to_xdp_tx_swbd()
1329 xdp_tx_swbd->len = len; in enetc_xdp_frame_to_xdp_tx_swbd()
1330 xdp_tx_swbd->is_xdp_redirect = true; in enetc_xdp_frame_to_xdp_tx_swbd()
1331 xdp_tx_swbd->is_eof = false; in enetc_xdp_frame_to_xdp_tx_swbd()
1332 xdp_tx_swbd->xdp_frame = NULL; in enetc_xdp_frame_to_xdp_tx_swbd()
1338 xdp_tx_arr[n - 1].is_eof = true; in enetc_xdp_frame_to_xdp_tx_swbd()
1339 xdp_tx_arr[n - 1].xdp_frame = xdp_frame; in enetc_xdp_frame_to_xdp_tx_swbd()
1355 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; in enetc_xdp_xmit()
1357 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); in enetc_xdp_xmit()
1371 tx_ring->stats.xdp_tx_drops++; in enetc_xdp_xmit()
1381 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; in enetc_xdp_xmit()
1392 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_xdp()
1396 rx_swbd->len = size; in enetc_map_rx_buff_to_xdp()
1398 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, in enetc_map_rx_buff_to_xdp()
1399 rx_ring->buffer_offset, size, false); in enetc_map_rx_buff_to_xdp()
1402 shinfo->nr_frags = 0; in enetc_map_rx_buff_to_xdp()
1410 skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; in enetc_add_rx_buff_to_xdp()
1413 rx_swbd->len = size; in enetc_add_rx_buff_to_xdp()
1415 skb_frag_off_set(frag, rx_swbd->page_offset); in enetc_add_rx_buff_to_xdp()
1417 __skb_frag_set_page(frag, rx_swbd->page); in enetc_add_rx_buff_to_xdp()
1419 shinfo->nr_frags++; in enetc_add_rx_buff_to_xdp()
1426 u16 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_xdp_buff()
1428 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); in enetc_build_xdp_buff()
1436 bd_status = le32_to_cpu((*rxbd)->r.lstatus); in enetc_build_xdp_buff()
1441 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_xdp_buff()
1461 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd()
1465 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1466 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1467 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1468 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1469 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1470 tx_swbd->is_dma_page = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1471 tx_swbd->is_xdp_tx = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1472 tx_swbd->is_eof = false; in enetc_rx_swbd_to_xdp_tx_swbd()
1476 xdp_tx_arr[n - 1].is_eof = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1486 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1489 rx_ring->stats.xdp_drops++; in enetc_xdp_drop()
1496 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_xdp_free()
1498 if (rx_swbd->page) { in enetc_xdp_free()
1499 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_xdp_free()
1500 rx_swbd->dir); in enetc_xdp_free()
1501 __free_page(rx_swbd->page); in enetc_xdp_free()
1502 rx_swbd->page = NULL; in enetc_xdp_free()
1506 rx_ring->stats.xdp_redirect_failures++; in enetc_xdp_free()
1515 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); in enetc_clean_rx_ring_xdp()
1523 i = rx_ring->next_to_clean; in enetc_clean_rx_ring_xdp()
1534 bd_status = le32_to_cpu(rxbd->r.lstatus); in enetc_clean_rx_ring_xdp()
1538 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); in enetc_clean_rx_ring_xdp()
1556 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); in enetc_clean_rx_ring_xdp()
1559 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); in enetc_clean_rx_ring_xdp()
1578 tx_ring = priv->xdp_tx_ring[rx_ring->index]; in enetc_clean_rx_ring_xdp()
1585 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1587 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1588 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1595 * rx_swbd->page. in enetc_clean_rx_ring_xdp()
1598 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1612 if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) { in enetc_clean_rx_ring_xdp()
1614 rx_ring->stats.xdp_redirect_sg++; in enetc_clean_rx_ring_xdp()
1622 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1626 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); in enetc_clean_rx_ring_xdp()
1631 rx_ring->stats.xdp_redirect++; in enetc_clean_rx_ring_xdp()
1639 rx_ring->next_to_clean = i; in enetc_clean_rx_ring_xdp()
1641 rx_ring->stats.packets += rx_frm_cnt; in enetc_clean_rx_ring_xdp()
1642 rx_ring->stats.bytes += rx_byte_cnt; in enetc_clean_rx_ring_xdp()
1650 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) in enetc_clean_rx_ring_xdp()
1651 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - in enetc_clean_rx_ring_xdp()
1652 rx_ring->xdp.xdp_tx_in_flight); in enetc_clean_rx_ring_xdp()
1661 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_poll()
1669 for (i = 0; i < v->count_tx_rings; i++) in enetc_poll()
1670 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) in enetc_poll()
1673 prog = rx_ring->xdp.prog; in enetc_poll()
1681 v->rx_napi_work = true; in enetc_poll()
1690 if (likely(v->rx_dim_en)) in enetc_poll()
1693 v->rx_napi_work = false; in enetc_poll()
1696 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); in enetc_poll()
1698 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) in enetc_poll()
1699 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), in enetc_poll()
1711 struct enetc_hw *hw = &si->hw; in enetc_get_si_caps()
1716 si->num_rx_rings = (val >> 16) & 0xff; in enetc_get_si_caps()
1717 si->num_tx_rings = val & 0xff; in enetc_get_si_caps()
1720 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); in enetc_get_si_caps()
1721 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); in enetc_get_si_caps()
1723 si->num_rss = 0; in enetc_get_si_caps()
1729 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); in enetc_get_si_caps()
1733 si->hw_features |= ENETC_SI_F_QBV; in enetc_get_si_caps()
1736 si->hw_features |= ENETC_SI_F_PSFP; in enetc_get_si_caps()
1741 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, in enetc_dma_alloc_bdr()
1742 &r->bd_dma_base, GFP_KERNEL); in enetc_dma_alloc_bdr()
1743 if (!r->bd_base) in enetc_dma_alloc_bdr()
1744 return -ENOMEM; in enetc_dma_alloc_bdr()
1747 if (!IS_ALIGNED(r->bd_dma_base, 128)) { in enetc_dma_alloc_bdr()
1748 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, in enetc_dma_alloc_bdr()
1749 r->bd_dma_base); in enetc_dma_alloc_bdr()
1750 return -EINVAL; in enetc_dma_alloc_bdr()
1760 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); in enetc_alloc_txbdr()
1761 if (!txr->tx_swbd) in enetc_alloc_txbdr()
1762 return -ENOMEM; in enetc_alloc_txbdr()
1768 txr->tso_headers = dma_alloc_coherent(txr->dev, in enetc_alloc_txbdr()
1769 txr->bd_count * TSO_HEADER_SIZE, in enetc_alloc_txbdr()
1770 &txr->tso_headers_dma, in enetc_alloc_txbdr()
1772 if (!txr->tso_headers) { in enetc_alloc_txbdr()
1773 err = -ENOMEM; in enetc_alloc_txbdr()
1777 txr->next_to_clean = 0; in enetc_alloc_txbdr()
1778 txr->next_to_use = 0; in enetc_alloc_txbdr()
1783 dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd), in enetc_alloc_txbdr()
1784 txr->bd_base, txr->bd_dma_base); in enetc_alloc_txbdr()
1785 txr->bd_base = NULL; in enetc_alloc_txbdr()
1787 vfree(txr->tx_swbd); in enetc_alloc_txbdr()
1788 txr->tx_swbd = NULL; in enetc_alloc_txbdr()
1797 for (i = 0; i < txr->bd_count; i++) in enetc_free_txbdr()
1798 enetc_free_tx_frame(txr, &txr->tx_swbd[i]); in enetc_free_txbdr()
1800 size = txr->bd_count * sizeof(union enetc_tx_bd); in enetc_free_txbdr()
1802 dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE, in enetc_free_txbdr()
1803 txr->tso_headers, txr->tso_headers_dma); in enetc_free_txbdr()
1804 txr->tso_headers = NULL; in enetc_free_txbdr()
1806 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); in enetc_free_txbdr()
1807 txr->bd_base = NULL; in enetc_free_txbdr()
1809 vfree(txr->tx_swbd); in enetc_free_txbdr()
1810 txr->tx_swbd = NULL; in enetc_free_txbdr()
1817 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_alloc_tx_resources()
1818 err = enetc_alloc_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
1827 while (i-- > 0) in enetc_alloc_tx_resources()
1828 enetc_free_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
1837 for (i = 0; i < priv->num_tx_rings; i++) in enetc_free_tx_resources()
1838 enetc_free_txbdr(priv->tx_ring[i]); in enetc_free_tx_resources()
1846 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); in enetc_alloc_rxbdr()
1847 if (!rxr->rx_swbd) in enetc_alloc_rxbdr()
1848 return -ENOMEM; in enetc_alloc_rxbdr()
1855 vfree(rxr->rx_swbd); in enetc_alloc_rxbdr()
1859 rxr->next_to_clean = 0; in enetc_alloc_rxbdr()
1860 rxr->next_to_use = 0; in enetc_alloc_rxbdr()
1861 rxr->next_to_alloc = 0; in enetc_alloc_rxbdr()
1862 rxr->ext_en = extended; in enetc_alloc_rxbdr()
1871 size = rxr->bd_count * sizeof(union enetc_rx_bd); in enetc_free_rxbdr()
1873 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); in enetc_free_rxbdr()
1874 rxr->bd_base = NULL; in enetc_free_rxbdr()
1876 vfree(rxr->rx_swbd); in enetc_free_rxbdr()
1877 rxr->rx_swbd = NULL; in enetc_free_rxbdr()
1882 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); in enetc_alloc_rx_resources()
1885 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_alloc_rx_resources()
1886 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended); in enetc_alloc_rx_resources()
1895 while (i-- > 0) in enetc_alloc_rx_resources()
1896 enetc_free_rxbdr(priv->rx_ring[i]); in enetc_alloc_rx_resources()
1905 for (i = 0; i < priv->num_rx_rings; i++) in enetc_free_rx_resources()
1906 enetc_free_rxbdr(priv->rx_ring[i]); in enetc_free_rx_resources()
1913 if (!tx_ring->tx_swbd) in enetc_free_tx_ring()
1916 for (i = 0; i < tx_ring->bd_count; i++) { in enetc_free_tx_ring()
1917 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring()
1922 tx_ring->next_to_clean = 0; in enetc_free_tx_ring()
1923 tx_ring->next_to_use = 0; in enetc_free_tx_ring()
1930 if (!rx_ring->rx_swbd) in enetc_free_rx_ring()
1933 for (i = 0; i < rx_ring->bd_count; i++) { in enetc_free_rx_ring()
1934 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring()
1936 if (!rx_swbd->page) in enetc_free_rx_ring()
1939 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
1940 rx_swbd->dir); in enetc_free_rx_ring()
1941 __free_page(rx_swbd->page); in enetc_free_rx_ring()
1942 rx_swbd->page = NULL; in enetc_free_rx_ring()
1945 rx_ring->next_to_clean = 0; in enetc_free_rx_ring()
1946 rx_ring->next_to_use = 0; in enetc_free_rx_ring()
1947 rx_ring->next_to_alloc = 0; in enetc_free_rx_ring()
1954 for (i = 0; i < priv->num_rx_rings; i++) in enetc_free_rxtx_rings()
1955 enetc_free_rx_ring(priv->rx_ring[i]); in enetc_free_rxtx_rings()
1957 for (i = 0; i < priv->num_tx_rings; i++) in enetc_free_rxtx_rings()
1958 enetc_free_tx_ring(priv->tx_ring[i]); in enetc_free_rxtx_rings()
1966 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); in enetc_setup_default_rss_table()
1968 return -ENOMEM; in enetc_setup_default_rss_table()
1971 for (i = 0; i < si->num_rss; i++) in enetc_setup_default_rss_table()
1974 enetc_set_rss_table(si, rss_table, si->num_rss); in enetc_setup_default_rss_table()
1983 struct enetc_si *si = priv->si; in enetc_configure_si()
1984 struct enetc_hw *hw = &si->hw; in enetc_configure_si()
1994 if (si->num_rss) { in enetc_configure_si()
1995 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); in enetc_configure_si()
2005 struct enetc_si *si = priv->si; in enetc_init_si_rings_params()
2008 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; in enetc_init_si_rings_params()
2009 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; in enetc_init_si_rings_params()
2013 * TODO: Make # of TX rings run-time configurable in enetc_init_si_rings_params()
2015 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); in enetc_init_si_rings_params()
2016 priv->num_tx_rings = si->num_tx_rings; in enetc_init_si_rings_params()
2017 priv->bdr_int_num = cpus; in enetc_init_si_rings_params()
2018 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; in enetc_init_si_rings_params()
2019 priv->tx_ictt = ENETC_TXIC_TIMETHR; in enetc_init_si_rings_params()
2024 struct enetc_si *si = priv->si; in enetc_alloc_si_resources()
2026 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), in enetc_alloc_si_resources()
2028 if (!priv->cls_rules) in enetc_alloc_si_resources()
2029 return -ENOMEM; in enetc_alloc_si_resources()
2036 kfree(priv->cls_rules); in enetc_free_si_resources()
2041 int idx = tx_ring->index; in enetc_setup_txbdr()
2045 lower_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2048 upper_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2050 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_txbdr()
2052 ENETC_RTBLENR_LEN(tx_ring->bd_count)); in enetc_setup_txbdr()
2055 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2056 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2061 tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); in enetc_setup_txbdr()
2062 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in enetc_setup_txbdr()
2068 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2069 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2070 tx_ring->idr = hw->reg + ENETC_SITXIDR; in enetc_setup_txbdr()
2075 int idx = rx_ring->index; in enetc_setup_rxbdr()
2079 lower_32_bits(rx_ring->bd_dma_base)); in enetc_setup_rxbdr()
2082 upper_32_bits(rx_ring->bd_dma_base)); in enetc_setup_rxbdr()
2084 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_rxbdr()
2086 ENETC_RTBLENR_LEN(rx_ring->bd_count)); in enetc_setup_rxbdr()
2088 if (rx_ring->xdp.prog) in enetc_setup_rxbdr()
2105 if (rx_ring->ext_en) in enetc_setup_rxbdr()
2108 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in enetc_setup_rxbdr()
2111 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); in enetc_setup_rxbdr()
2112 rx_ring->idr = hw->reg + ENETC_SIRXIDR; in enetc_setup_rxbdr()
2124 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_bdrs()
2127 for (i = 0; i < priv->num_tx_rings; i++) in enetc_setup_bdrs()
2128 enetc_setup_txbdr(hw, priv->tx_ring[i]); in enetc_setup_bdrs()
2130 for (i = 0; i < priv->num_rx_rings; i++) in enetc_setup_bdrs()
2131 enetc_setup_rxbdr(hw, priv->rx_ring[i]); in enetc_setup_bdrs()
2136 int idx = rx_ring->index; in enetc_clear_rxbdr()
2138 /* disable EN bit on ring */ in enetc_clear_rxbdr()
2145 int idx = tx_ring->index; in enetc_clear_txbdr()
2147 /* disable EN bit on ring */ in enetc_clear_txbdr()
2158 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", in enetc_clear_txbdr()
2164 struct enetc_hw *hw = &priv->si->hw; in enetc_clear_bdrs()
2167 for (i = 0; i < priv->num_tx_rings; i++) in enetc_clear_bdrs()
2168 enetc_clear_txbdr(hw, priv->tx_ring[i]); in enetc_clear_bdrs()
2170 for (i = 0; i < priv->num_rx_rings; i++) in enetc_clear_bdrs()
2171 enetc_clear_rxbdr(hw, priv->rx_ring[i]); in enetc_clear_bdrs()
2178 struct pci_dev *pdev = priv->si->pdev; in enetc_setup_irqs()
2179 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_irqs()
2182 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_setup_irqs()
2184 struct enetc_int_vector *v = priv->int_vector[i]; in enetc_setup_irqs()
2187 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", in enetc_setup_irqs()
2188 priv->ndev->name, i); in enetc_setup_irqs()
2189 err = request_irq(irq, enetc_msix, 0, v->name, v); in enetc_setup_irqs()
2191 dev_err(priv->dev, "request_irq() failed!\n"); in enetc_setup_irqs()
2196 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); in enetc_setup_irqs()
2197 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); in enetc_setup_irqs()
2198 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); in enetc_setup_irqs()
2202 for (j = 0; j < v->count_tx_rings; j++) { in enetc_setup_irqs()
2203 int idx = v->tx_ring[j].index; in enetc_setup_irqs()
2213 while (i--) { in enetc_setup_irqs()
2217 free_irq(irq, priv->int_vector[i]); in enetc_setup_irqs()
2225 struct pci_dev *pdev = priv->si->pdev; in enetc_free_irqs()
2228 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_free_irqs()
2232 free_irq(irq, priv->int_vector[i]); in enetc_free_irqs()
2238 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_interrupts()
2243 if (priv->ic_mode & in enetc_setup_interrupts()
2246 /* init to non-0 minimum, will be adjusted later */ in enetc_setup_interrupts()
2253 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_setup_interrupts()
2259 if (priv->ic_mode & ENETC_IC_TX_MANUAL) in enetc_setup_interrupts()
2264 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_setup_interrupts()
2265 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); in enetc_setup_interrupts()
2273 struct enetc_hw *hw = &priv->si->hw; in enetc_clear_interrupts()
2276 for (i = 0; i < priv->num_tx_rings; i++) in enetc_clear_interrupts()
2279 for (i = 0; i < priv->num_rx_rings; i++) in enetc_clear_interrupts()
2289 if (!priv->phylink) in enetc_phylink_connect()
2290 return 0; /* phy-less mode */ in enetc_phylink_connect()
2292 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); in enetc_phylink_connect()
2294 dev_err(&ndev->dev, "could not attach to PHY\n"); in enetc_phylink_connect()
2300 phylink_ethtool_set_eee(priv->phylink, &edata); in enetc_phylink_connect()
2312 netif_tx_lock(priv->ndev); in enetc_tx_onestep_tstamp()
2314 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); in enetc_tx_onestep_tstamp()
2315 skb = skb_dequeue(&priv->tx_skbs); in enetc_tx_onestep_tstamp()
2317 enetc_start_xmit(skb, priv->ndev); in enetc_tx_onestep_tstamp()
2319 netif_tx_unlock(priv->ndev); in enetc_tx_onestep_tstamp()
2324 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); in enetc_tx_onestep_tstamp_init()
2325 skb_queue_head_init(&priv->tx_skbs); in enetc_tx_onestep_tstamp_init()
2335 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_start()
2336 int irq = pci_irq_vector(priv->si->pdev, in enetc_start()
2339 napi_enable(&priv->int_vector[i]->napi); in enetc_start()
2343 if (priv->phylink) in enetc_start()
2344 phylink_start(priv->phylink); in enetc_start()
2379 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings); in enetc_open()
2394 if (priv->phylink) in enetc_open()
2395 phylink_disconnect_phy(priv->phylink); in enetc_open()
2409 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_stop()
2410 int irq = pci_irq_vector(priv->si->pdev, in enetc_stop()
2414 napi_synchronize(&priv->int_vector[i]->napi); in enetc_stop()
2415 napi_disable(&priv->int_vector[i]->napi); in enetc_stop()
2418 if (priv->phylink) in enetc_stop()
2419 phylink_stop(priv->phylink); in enetc_stop()
2433 if (priv->phylink) in enetc_close()
2434 phylink_disconnect_phy(priv->phylink); in enetc_close()
2447 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_tc_mqprio()
2454 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in enetc_setup_tc_mqprio()
2455 num_tc = mqprio->num_tc; in enetc_setup_tc_mqprio()
2462 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_setup_tc_mqprio()
2463 tx_ring = priv->tx_ring[i]; in enetc_setup_tc_mqprio()
2464 tx_ring->prio = 0; in enetc_setup_tc_mqprio()
2465 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); in enetc_setup_tc_mqprio()
2474 priv->num_tx_rings); in enetc_setup_tc_mqprio()
2475 return -EINVAL; in enetc_setup_tc_mqprio()
2483 tx_ring = priv->tx_ring[i]; in enetc_setup_tc_mqprio()
2484 tx_ring->prio = i; in enetc_setup_tc_mqprio()
2485 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); in enetc_setup_tc_mqprio()
2515 old_prog = xchg(&priv->xdp_prog, prog); in enetc_setup_xdp_prog()
2519 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_setup_xdp_prog()
2520 struct enetc_bdr *rx_ring = priv->rx_ring[i]; in enetc_setup_xdp_prog()
2522 rx_ring->xdp.prog = prog; in enetc_setup_xdp_prog()
2525 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; in enetc_setup_xdp_prog()
2527 rx_ring->buffer_offset = ENETC_RXB_PAD; in enetc_setup_xdp_prog()
2538 switch (xdp->command) { in enetc_setup_bpf()
2540 return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack); in enetc_setup_bpf()
2542 return -EINVAL; in enetc_setup_bpf()
2551 struct net_device_stats *stats = &ndev->stats; in enetc_get_stats()
2556 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_get_stats()
2557 packets += priv->rx_ring[i]->stats.packets; in enetc_get_stats()
2558 bytes += priv->rx_ring[i]->stats.bytes; in enetc_get_stats()
2561 stats->rx_packets = packets; in enetc_get_stats()
2562 stats->rx_bytes = bytes; in enetc_get_stats()
2566 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_get_stats()
2567 packets += priv->tx_ring[i]->stats.packets; in enetc_get_stats()
2568 bytes += priv->tx_ring[i]->stats.bytes; in enetc_get_stats()
2569 tx_dropped += priv->tx_ring[i]->stats.win_drop; in enetc_get_stats()
2572 stats->tx_packets = packets; in enetc_get_stats()
2573 stats->tx_bytes = bytes; in enetc_get_stats()
2574 stats->tx_dropped = tx_dropped; in enetc_get_stats()
2579 static int enetc_set_rss(struct net_device *ndev, int en) in enetc_set_rss() argument
2582 struct enetc_hw *hw = &priv->si->hw; in enetc_set_rss()
2585 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); in enetc_set_rss()
2589 reg |= (en) ? ENETC_SIMR_RSSE : 0; in enetc_set_rss()
2595 static void enetc_enable_rxvlan(struct net_device *ndev, bool en) in enetc_enable_rxvlan() argument
2598 struct enetc_hw *hw = &priv->si->hw; in enetc_enable_rxvlan()
2601 for (i = 0; i < priv->num_rx_rings; i++) in enetc_enable_rxvlan()
2602 enetc_bdr_enable_rxvlan(hw, i, en); in enetc_enable_rxvlan()
2605 static void enetc_enable_txvlan(struct net_device *ndev, bool en) in enetc_enable_txvlan() argument
2608 struct enetc_hw *hw = &priv->si->hw; in enetc_enable_txvlan()
2611 for (i = 0; i < priv->num_tx_rings; i++) in enetc_enable_txvlan()
2612 enetc_bdr_enable_txvlan(hw, i, en); in enetc_enable_txvlan()
2617 netdev_features_t changed = ndev->features ^ features; in enetc_set_features()
2638 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in enetc_hwtstamp_set()
2639 return -EFAULT; in enetc_hwtstamp_set()
2643 priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; in enetc_hwtstamp_set()
2646 priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; in enetc_hwtstamp_set()
2647 priv->active_offloads |= ENETC_F_TX_TSTAMP; in enetc_hwtstamp_set()
2650 priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; in enetc_hwtstamp_set()
2651 priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; in enetc_hwtstamp_set()
2654 return -ERANGE; in enetc_hwtstamp_set()
2657 ao = priv->active_offloads; in enetc_hwtstamp_set()
2660 priv->active_offloads &= ~ENETC_F_RX_TSTAMP; in enetc_hwtstamp_set()
2663 priv->active_offloads |= ENETC_F_RX_TSTAMP; in enetc_hwtstamp_set()
2667 if (netif_running(ndev) && ao != priv->active_offloads) { in enetc_hwtstamp_set()
2672 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in enetc_hwtstamp_set()
2673 -EFAULT : 0; in enetc_hwtstamp_set()
2683 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) in enetc_hwtstamp_get()
2685 else if (priv->active_offloads & ENETC_F_TX_TSTAMP) in enetc_hwtstamp_get()
2690 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? in enetc_hwtstamp_get()
2693 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in enetc_hwtstamp_get()
2694 -EFAULT : 0; in enetc_hwtstamp_get()
2708 if (!priv->phylink) in enetc_ioctl()
2709 return -EOPNOTSUPP; in enetc_ioctl()
2711 return phylink_mii_ioctl(priv->phylink, rq, cmd); in enetc_ioctl()
2716 struct pci_dev *pdev = priv->si->pdev; in enetc_alloc_msix()
2721 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; in enetc_alloc_msix()
2729 return -EPERM; in enetc_alloc_msix()
2732 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; in enetc_alloc_msix()
2734 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_alloc_msix()
2741 err = -ENOMEM; in enetc_alloc_msix()
2745 priv->int_vector[i] = v; in enetc_alloc_msix()
2747 bdr = &v->rx_ring; in enetc_alloc_msix()
2748 bdr->index = i; in enetc_alloc_msix()
2749 bdr->ndev = priv->ndev; in enetc_alloc_msix()
2750 bdr->dev = priv->dev; in enetc_alloc_msix()
2751 bdr->bd_count = priv->rx_bd_count; in enetc_alloc_msix()
2752 bdr->buffer_offset = ENETC_RXB_PAD; in enetc_alloc_msix()
2753 priv->rx_ring[i] = bdr; in enetc_alloc_msix()
2755 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); in enetc_alloc_msix()
2761 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, in enetc_alloc_msix()
2764 xdp_rxq_info_unreg(&bdr->xdp.rxq); in enetc_alloc_msix()
2770 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { in enetc_alloc_msix()
2771 v->rx_ictt = 0x1; in enetc_alloc_msix()
2772 v->rx_dim_en = true; in enetc_alloc_msix()
2774 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); in enetc_alloc_msix()
2775 netif_napi_add(priv->ndev, &v->napi, enetc_poll); in enetc_alloc_msix()
2776 v->count_tx_rings = v_tx_rings; in enetc_alloc_msix()
2782 idx = priv->bdr_int_num * j + i; in enetc_alloc_msix()
2783 __set_bit(idx, &v->tx_rings_map); in enetc_alloc_msix()
2784 bdr = &v->tx_ring[j]; in enetc_alloc_msix()
2785 bdr->index = idx; in enetc_alloc_msix()
2786 bdr->ndev = priv->ndev; in enetc_alloc_msix()
2787 bdr->dev = priv->dev; in enetc_alloc_msix()
2788 bdr->bd_count = priv->tx_bd_count; in enetc_alloc_msix()
2789 priv->tx_ring[idx] = bdr; in enetc_alloc_msix()
2793 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); in enetc_alloc_msix()
2794 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; in enetc_alloc_msix()
2799 while (i--) { in enetc_alloc_msix()
2800 struct enetc_int_vector *v = priv->int_vector[i]; in enetc_alloc_msix()
2801 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_alloc_msix()
2803 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); in enetc_alloc_msix()
2804 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); in enetc_alloc_msix()
2805 netif_napi_del(&v->napi); in enetc_alloc_msix()
2806 cancel_work_sync(&v->rx_dim.work); in enetc_alloc_msix()
2819 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_free_msix()
2820 struct enetc_int_vector *v = priv->int_vector[i]; in enetc_free_msix()
2821 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_free_msix()
2823 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); in enetc_free_msix()
2824 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); in enetc_free_msix()
2825 netif_napi_del(&v->napi); in enetc_free_msix()
2826 cancel_work_sync(&v->rx_dim.work); in enetc_free_msix()
2829 for (i = 0; i < priv->num_rx_rings; i++) in enetc_free_msix()
2830 priv->rx_ring[i] = NULL; in enetc_free_msix()
2832 for (i = 0; i < priv->num_tx_rings; i++) in enetc_free_msix()
2833 priv->tx_ring[i] = NULL; in enetc_free_msix()
2835 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_free_msix()
2836 kfree(priv->int_vector[i]); in enetc_free_msix()
2837 priv->int_vector[i] = NULL; in enetc_free_msix()
2841 pci_free_irq_vectors(priv->si->pdev); in enetc_free_msix()
2846 char *p = (char *)si - si->pad; in enetc_kfree_si()
2853 if (si->pdev->revision == ENETC_REV1) in enetc_detect_errata()
2854 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; in enetc_detect_errata()
2867 return dev_err_probe(&pdev->dev, err, "device enable failed\n"); in enetc_pci_probe()
2870 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in enetc_pci_probe()
2872 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); in enetc_pci_probe()
2878 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); in enetc_pci_probe()
2891 alloc_size += ENETC_SI_ALIGN - 1; in enetc_pci_probe()
2895 err = -ENOMEM; in enetc_pci_probe()
2900 si->pad = (char *)si - (char *)p; in enetc_pci_probe()
2903 si->pdev = pdev; in enetc_pci_probe()
2904 hw = &si->hw; in enetc_pci_probe()
2907 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); in enetc_pci_probe()
2908 if (!hw->reg) { in enetc_pci_probe()
2909 err = -ENXIO; in enetc_pci_probe()
2910 dev_err(&pdev->dev, "ioremap() failed\n"); in enetc_pci_probe()
2914 hw->port = hw->reg + ENETC_PORT_BASE; in enetc_pci_probe()
2916 hw->global = hw->reg + ENETC_GLOBAL_BASE; in enetc_pci_probe()
2936 struct enetc_hw *hw = &si->hw; in enetc_pci_remove()
2938 iounmap(hw->reg); in enetc_pci_remove()