Lines Matching +full:self +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
32 for (i = 0; i < sinfo->nr_frags; i++) { in aq_get_rxpages_xdp()
33 skb_frag_t *frag = &sinfo->frags[i]; in aq_get_rxpages_xdp()
38 page_ref_inc(buff->rxdata.page); in aq_get_rxpages_xdp()
43 unsigned int len = PAGE_SIZE << rxpage->order; in aq_free_rxpage()
45 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE); in aq_free_rxpage()
48 __free_pages(rxpage->page, rxpage->order); in aq_free_rxpage()
49 rxpage->page = NULL; in aq_free_rxpage()
54 struct device *dev = aq_nic_get_dev(rx_ring->aq_nic); in aq_alloc_rxpages()
55 unsigned int order = rx_ring->page_order; in aq_alloc_rxpages()
57 int ret = -ENOMEM; in aq_alloc_rxpages()
70 rxpage->page = page; in aq_alloc_rxpages()
71 rxpage->daddr = daddr; in aq_alloc_rxpages()
72 rxpage->order = order; in aq_alloc_rxpages()
73 rxpage->pg_off = rx_ring->page_offset; in aq_alloc_rxpages()
84 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf) in aq_get_rxpages() argument
86 unsigned int order = self->page_order; in aq_get_rxpages()
87 u16 page_offset = self->page_offset; in aq_get_rxpages()
88 u16 frame_max = self->frame_max; in aq_get_rxpages()
89 u16 tail_size = self->tail_size; in aq_get_rxpages()
92 if (rxbuf->rxdata.page) { in aq_get_rxpages()
94 if (page_ref_count(rxbuf->rxdata.page) > 1) { in aq_get_rxpages()
96 rxbuf->rxdata.pg_off += frame_max + page_offset + in aq_get_rxpages()
98 if (rxbuf->rxdata.pg_off + frame_max + tail_size <= in aq_get_rxpages()
100 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
101 self->stats.rx.pg_flips++; in aq_get_rxpages()
102 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
108 aq_free_rxpage(&rxbuf->rxdata, in aq_get_rxpages()
109 aq_nic_get_dev(self->aq_nic)); in aq_get_rxpages()
110 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
111 self->stats.rx.pg_losts++; in aq_get_rxpages()
112 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
115 rxbuf->rxdata.pg_off = page_offset; in aq_get_rxpages()
116 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
117 self->stats.rx.pg_reuses++; in aq_get_rxpages()
118 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
122 if (!rxbuf->rxdata.page) { in aq_get_rxpages()
123 ret = aq_alloc_rxpages(&rxbuf->rxdata, self); in aq_get_rxpages()
125 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
126 self->stats.rx.alloc_fails++; in aq_get_rxpages()
127 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
135 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, in aq_ring_alloc() argument
140 self->buff_ring = in aq_ring_alloc()
141 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); in aq_ring_alloc()
143 if (!self->buff_ring) { in aq_ring_alloc()
144 err = -ENOMEM; in aq_ring_alloc()
148 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), in aq_ring_alloc()
149 self->size * self->dx_size, in aq_ring_alloc()
150 &self->dx_ring_pa, GFP_KERNEL); in aq_ring_alloc()
151 if (!self->dx_ring) { in aq_ring_alloc()
152 err = -ENOMEM; in aq_ring_alloc()
158 aq_ring_free(self); in aq_ring_alloc()
159 self = NULL; in aq_ring_alloc()
162 return self; in aq_ring_alloc()
165 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, in aq_ring_tx_alloc() argument
172 self->aq_nic = aq_nic; in aq_ring_tx_alloc()
173 self->idx = idx; in aq_ring_tx_alloc()
174 self->size = aq_nic_cfg->txds; in aq_ring_tx_alloc()
175 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; in aq_ring_tx_alloc()
177 self = aq_ring_alloc(self, aq_nic); in aq_ring_tx_alloc()
178 if (!self) { in aq_ring_tx_alloc()
179 err = -ENOMEM; in aq_ring_tx_alloc()
185 aq_ring_free(self); in aq_ring_tx_alloc()
186 self = NULL; in aq_ring_tx_alloc()
189 return self; in aq_ring_tx_alloc()
192 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, in aq_ring_rx_alloc() argument
199 self->aq_nic = aq_nic; in aq_ring_rx_alloc()
200 self->idx = idx; in aq_ring_rx_alloc()
201 self->size = aq_nic_cfg->rxds; in aq_ring_rx_alloc()
202 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; in aq_ring_rx_alloc()
203 self->xdp_prog = aq_nic->xdp_prog; in aq_ring_rx_alloc()
204 self->frame_max = AQ_CFG_RX_FRAME_MAX; in aq_ring_rx_alloc()
206 /* Only order-2 is allowed if XDP is enabled */ in aq_ring_rx_alloc()
207 if (READ_ONCE(self->xdp_prog)) { in aq_ring_rx_alloc()
208 self->page_offset = AQ_XDP_HEADROOM; in aq_ring_rx_alloc()
209 self->page_order = AQ_CFG_XDP_PAGEORDER; in aq_ring_rx_alloc()
210 self->tail_size = AQ_XDP_TAILROOM; in aq_ring_rx_alloc()
212 self->page_offset = 0; in aq_ring_rx_alloc()
213 self->page_order = fls(self->frame_max / PAGE_SIZE + in aq_ring_rx_alloc()
214 (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1; in aq_ring_rx_alloc()
215 if (aq_nic_cfg->rxpageorder > self->page_order) in aq_ring_rx_alloc()
216 self->page_order = aq_nic_cfg->rxpageorder; in aq_ring_rx_alloc()
217 self->tail_size = 0; in aq_ring_rx_alloc()
220 self = aq_ring_alloc(self, aq_nic); in aq_ring_rx_alloc()
221 if (!self) { in aq_ring_rx_alloc()
222 err = -ENOMEM; in aq_ring_rx_alloc()
228 aq_ring_free(self); in aq_ring_rx_alloc()
229 self = NULL; in aq_ring_rx_alloc()
232 return self; in aq_ring_rx_alloc()
236 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, in aq_ring_hwts_rx_alloc() argument
242 memset(self, 0, sizeof(*self)); in aq_ring_hwts_rx_alloc()
244 self->aq_nic = aq_nic; in aq_ring_hwts_rx_alloc()
245 self->idx = idx; in aq_ring_hwts_rx_alloc()
246 self->size = size; in aq_ring_hwts_rx_alloc()
247 self->dx_size = dx_size; in aq_ring_hwts_rx_alloc()
249 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa, in aq_ring_hwts_rx_alloc()
251 if (!self->dx_ring) { in aq_ring_hwts_rx_alloc()
252 aq_ring_free(self); in aq_ring_hwts_rx_alloc()
256 return self; in aq_ring_hwts_rx_alloc()
259 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) in aq_ring_init() argument
261 self->hw_head = 0; in aq_ring_init()
262 self->sw_head = 0; in aq_ring_init()
263 self->sw_tail = 0; in aq_ring_init()
264 self->ring_type = ring_type; in aq_ring_init()
266 if (self->ring_type == ATL_RING_RX) in aq_ring_init()
267 u64_stats_init(&self->stats.rx.syncp); in aq_ring_init()
269 u64_stats_init(&self->stats.tx.syncp); in aq_ring_init()
290 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); in aq_ring_queue_wake()
293 AQ_NIC_RING2QMAP(ring->aq_nic, in aq_ring_queue_wake()
294 ring->idx))) { in aq_ring_queue_wake()
296 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); in aq_ring_queue_wake()
297 u64_stats_update_begin(&ring->stats.tx.syncp); in aq_ring_queue_wake()
298 ring->stats.tx.queue_restarts++; in aq_ring_queue_wake()
299 u64_stats_update_end(&ring->stats.tx.syncp); in aq_ring_queue_wake()
305 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); in aq_ring_queue_stop()
308 AQ_NIC_RING2QMAP(ring->aq_nic, in aq_ring_queue_stop()
309 ring->idx))) in aq_ring_queue_stop()
311 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); in aq_ring_queue_stop()
314 bool aq_ring_tx_clean(struct aq_ring_s *self) in aq_ring_tx_clean() argument
316 struct device *dev = aq_nic_get_dev(self->aq_nic); in aq_ring_tx_clean()
320 budget && self->sw_head != self->hw_head; budget--) { in aq_ring_tx_clean()
321 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_tx_clean()
323 if (likely(buff->is_mapped)) { in aq_ring_tx_clean()
324 if (unlikely(buff->is_sop)) { in aq_ring_tx_clean()
325 if (!buff->is_eop && in aq_ring_tx_clean()
326 buff->eop_index != 0xffffU && in aq_ring_tx_clean()
327 (!aq_ring_dx_in_range(self->sw_head, in aq_ring_tx_clean()
328 buff->eop_index, in aq_ring_tx_clean()
329 self->hw_head))) in aq_ring_tx_clean()
332 dma_unmap_single(dev, buff->pa, buff->len, in aq_ring_tx_clean()
335 dma_unmap_page(dev, buff->pa, buff->len, in aq_ring_tx_clean()
340 if (likely(!buff->is_eop)) in aq_ring_tx_clean()
343 if (buff->skb) { in aq_ring_tx_clean()
344 u64_stats_update_begin(&self->stats.tx.syncp); in aq_ring_tx_clean()
345 ++self->stats.tx.packets; in aq_ring_tx_clean()
346 self->stats.tx.bytes += buff->skb->len; in aq_ring_tx_clean()
347 u64_stats_update_end(&self->stats.tx.syncp); in aq_ring_tx_clean()
348 dev_kfree_skb_any(buff->skb); in aq_ring_tx_clean()
349 } else if (buff->xdpf) { in aq_ring_tx_clean()
350 u64_stats_update_begin(&self->stats.tx.syncp); in aq_ring_tx_clean()
351 ++self->stats.tx.packets; in aq_ring_tx_clean()
352 self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf); in aq_ring_tx_clean()
353 u64_stats_update_end(&self->stats.tx.syncp); in aq_ring_tx_clean()
354 xdp_return_frame_rx_napi(buff->xdpf); in aq_ring_tx_clean()
358 buff->skb = NULL; in aq_ring_tx_clean()
359 buff->xdpf = NULL; in aq_ring_tx_clean()
360 buff->pa = 0U; in aq_ring_tx_clean()
361 buff->eop_index = 0xffffU; in aq_ring_tx_clean()
362 self->sw_head = aq_ring_next_dx(self, self->sw_head); in aq_ring_tx_clean()
368 static void aq_rx_checksum(struct aq_ring_s *self, in aq_rx_checksum() argument
372 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) in aq_rx_checksum()
375 if (unlikely(buff->is_cso_err)) { in aq_rx_checksum()
376 u64_stats_update_begin(&self->stats.rx.syncp); in aq_rx_checksum()
377 ++self->stats.rx.errors; in aq_rx_checksum()
378 u64_stats_update_end(&self->stats.rx.syncp); in aq_rx_checksum()
379 skb->ip_summed = CHECKSUM_NONE; in aq_rx_checksum()
382 if (buff->is_ip_cso) { in aq_rx_checksum()
385 skb->ip_summed = CHECKSUM_NONE; in aq_rx_checksum()
388 if (buff->is_udp_cso || buff->is_tcp_cso) in aq_rx_checksum()
402 vec = cpu % aq_cfg->vecs; in aq_xdp_xmit()
403 ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)]; in aq_xdp_xmit()
412 return num_frames - drop; in aq_xdp_xmit()
446 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
447 ++rx_ring->stats.rx.packets; in aq_xdp_run_prog()
448 rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp); in aq_xdp_run_prog()
449 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
451 prog = READ_ONCE(rx_ring->xdp_prog); in aq_xdp_run_prog()
453 return aq_xdp_build_skb(xdp, aq_nic->ndev, buff); in aq_xdp_run_prog()
455 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in aq_xdp_run_prog()
458 if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags) in aq_xdp_run_prog()
464 skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff); in aq_xdp_run_prog()
467 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
468 ++rx_ring->stats.rx.xdp_pass; in aq_xdp_run_prog()
469 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
475 tx_ring = aq_nic->aq_ring_tx[rx_ring->idx]; in aq_xdp_run_prog()
479 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
480 ++rx_ring->stats.rx.xdp_tx; in aq_xdp_run_prog()
481 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
485 if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0) in aq_xdp_run_prog()
488 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
489 ++rx_ring->stats.rx.xdp_redirect; in aq_xdp_run_prog()
490 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
497 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
498 ++rx_ring->stats.rx.xdp_aborted; in aq_xdp_run_prog()
499 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
500 trace_xdp_exception(aq_nic->ndev, prog, act); in aq_xdp_run_prog()
501 bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act); in aq_xdp_run_prog()
504 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
505 ++rx_ring->stats.rx.xdp_drop; in aq_xdp_run_prog()
506 u64_stats_update_end(&rx_ring->stats.rx.syncp); in aq_xdp_run_prog()
510 return ERR_PTR(-result); in aq_xdp_run_prog()
525 if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) in aq_add_rx_fragment()
528 frag = &sinfo->frags[sinfo->nr_frags++]; in aq_add_rx_fragment()
529 buff_ = &ring->buff_ring[buff_->next]; in aq_add_rx_fragment()
531 buff_->rxdata.daddr, in aq_add_rx_fragment()
532 buff_->rxdata.pg_off, in aq_add_rx_fragment()
533 buff_->len, in aq_add_rx_fragment()
535 sinfo->xdp_frags_size += buff_->len; in aq_add_rx_fragment()
536 skb_frag_fill_page_desc(frag, buff_->rxdata.page, in aq_add_rx_fragment()
537 buff_->rxdata.pg_off, in aq_add_rx_fragment()
538 buff_->len); in aq_add_rx_fragment()
540 buff_->is_cleaned = 1; in aq_add_rx_fragment()
542 buff->is_ip_cso &= buff_->is_ip_cso; in aq_add_rx_fragment()
543 buff->is_udp_cso &= buff_->is_udp_cso; in aq_add_rx_fragment()
544 buff->is_tcp_cso &= buff_->is_tcp_cso; in aq_add_rx_fragment()
545 buff->is_cso_err |= buff_->is_cso_err; in aq_add_rx_fragment()
547 if (page_is_pfmemalloc(buff_->rxdata.page)) in aq_add_rx_fragment()
550 } while (!buff_->is_eop); in aq_add_rx_fragment()
557 static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, in __aq_ring_rx_clean() argument
560 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); in __aq_ring_rx_clean()
563 for (; (self->sw_head != self->hw_head) && budget; in __aq_ring_rx_clean()
564 self->sw_head = aq_ring_next_dx(self, self->sw_head), in __aq_ring_rx_clean()
565 --budget, ++(*work_done)) { in __aq_ring_rx_clean()
566 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in __aq_ring_rx_clean()
567 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self); in __aq_ring_rx_clean()
574 if (buff->is_cleaned) in __aq_ring_rx_clean()
577 if (!buff->is_eop) { in __aq_ring_rx_clean()
583 if (buff_->next >= self->size) { in __aq_ring_rx_clean()
584 err = -EIO; in __aq_ring_rx_clean()
589 next_ = buff_->next, in __aq_ring_rx_clean()
590 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
592 aq_ring_dx_in_range(self->sw_head, in __aq_ring_rx_clean()
594 self->hw_head); in __aq_ring_rx_clean()
602 buff->is_error |= buff_->is_error; in __aq_ring_rx_clean()
603 buff->is_cso_err |= buff_->is_cso_err; in __aq_ring_rx_clean()
605 } while (!buff_->is_eop); in __aq_ring_rx_clean()
607 if (buff->is_error || in __aq_ring_rx_clean()
608 (buff->is_lro && buff->is_cso_err)) { in __aq_ring_rx_clean()
611 if (buff_->next >= self->size) { in __aq_ring_rx_clean()
612 err = -EIO; in __aq_ring_rx_clean()
615 next_ = buff_->next, in __aq_ring_rx_clean()
616 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
618 buff_->is_cleaned = true; in __aq_ring_rx_clean()
619 } while (!buff_->is_eop); in __aq_ring_rx_clean()
621 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
622 ++self->stats.rx.errors; in __aq_ring_rx_clean()
623 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
628 if (buff->is_error) { in __aq_ring_rx_clean()
629 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
630 ++self->stats.rx.errors; in __aq_ring_rx_clean()
631 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
635 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), in __aq_ring_rx_clean()
636 buff->rxdata.daddr, in __aq_ring_rx_clean()
637 buff->rxdata.pg_off, in __aq_ring_rx_clean()
638 buff->len, DMA_FROM_DEVICE); in __aq_ring_rx_clean()
642 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
643 self->stats.rx.skb_alloc_fails++; in __aq_ring_rx_clean()
644 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
645 err = -ENOMEM; in __aq_ring_rx_clean()
649 buff->len -= in __aq_ring_rx_clean()
650 aq_ptp_extract_ts(self->aq_nic, skb, in __aq_ring_rx_clean()
651 aq_buf_vaddr(&buff->rxdata), in __aq_ring_rx_clean()
652 buff->len); in __aq_ring_rx_clean()
654 hdr_len = buff->len; in __aq_ring_rx_clean()
656 hdr_len = eth_get_headlen(skb->dev, in __aq_ring_rx_clean()
657 aq_buf_vaddr(&buff->rxdata), in __aq_ring_rx_clean()
660 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata), in __aq_ring_rx_clean()
663 if (buff->len - hdr_len > 0) { in __aq_ring_rx_clean()
664 skb_add_rx_frag(skb, i++, buff->rxdata.page, in __aq_ring_rx_clean()
665 buff->rxdata.pg_off + hdr_len, in __aq_ring_rx_clean()
666 buff->len - hdr_len, in __aq_ring_rx_clean()
667 self->frame_max); in __aq_ring_rx_clean()
668 page_ref_inc(buff->rxdata.page); in __aq_ring_rx_clean()
671 if (!buff->is_eop) { in __aq_ring_rx_clean()
674 next_ = buff_->next; in __aq_ring_rx_clean()
675 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
677 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), in __aq_ring_rx_clean()
678 buff_->rxdata.daddr, in __aq_ring_rx_clean()
679 buff_->rxdata.pg_off, in __aq_ring_rx_clean()
680 buff_->len, in __aq_ring_rx_clean()
683 buff_->rxdata.page, in __aq_ring_rx_clean()
684 buff_->rxdata.pg_off, in __aq_ring_rx_clean()
685 buff_->len, in __aq_ring_rx_clean()
686 self->frame_max); in __aq_ring_rx_clean()
687 page_ref_inc(buff_->rxdata.page); in __aq_ring_rx_clean()
688 buff_->is_cleaned = 1; in __aq_ring_rx_clean()
690 buff->is_ip_cso &= buff_->is_ip_cso; in __aq_ring_rx_clean()
691 buff->is_udp_cso &= buff_->is_udp_cso; in __aq_ring_rx_clean()
692 buff->is_tcp_cso &= buff_->is_tcp_cso; in __aq_ring_rx_clean()
693 buff->is_cso_err |= buff_->is_cso_err; in __aq_ring_rx_clean()
695 } while (!buff_->is_eop); in __aq_ring_rx_clean()
698 if (buff->is_vlan) in __aq_ring_rx_clean()
700 buff->vlan_rx_tag); in __aq_ring_rx_clean()
702 skb->protocol = eth_type_trans(skb, ndev); in __aq_ring_rx_clean()
704 aq_rx_checksum(self, buff, skb); in __aq_ring_rx_clean()
706 skb_set_hash(skb, buff->rss_hash, in __aq_ring_rx_clean()
707 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : in __aq_ring_rx_clean()
712 : AQ_NIC_RING2QMAP(self->aq_nic, in __aq_ring_rx_clean()
713 self->idx)); in __aq_ring_rx_clean()
715 u64_stats_update_begin(&self->stats.rx.syncp); in __aq_ring_rx_clean()
716 ++self->stats.rx.packets; in __aq_ring_rx_clean()
717 self->stats.rx.bytes += skb->len; in __aq_ring_rx_clean()
718 u64_stats_update_end(&self->stats.rx.syncp); in __aq_ring_rx_clean()
731 int frame_sz = rx_ring->page_offset + rx_ring->frame_max + in __aq_ring_xdp_clean()
732 rx_ring->tail_size; in __aq_ring_xdp_clean()
733 struct aq_nic_s *aq_nic = rx_ring->aq_nic; in __aq_ring_xdp_clean()
739 for (; (rx_ring->sw_head != rx_ring->hw_head) && budget; in __aq_ring_xdp_clean()
740 rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head), in __aq_ring_xdp_clean()
741 --budget, ++(*work_done)) { in __aq_ring_xdp_clean()
742 struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; in __aq_ring_xdp_clean()
743 bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); in __aq_ring_xdp_clean()
750 if (buff->is_cleaned) in __aq_ring_xdp_clean()
753 if (!buff->is_eop) { in __aq_ring_xdp_clean()
756 if (buff_->next >= rx_ring->size) { in __aq_ring_xdp_clean()
757 err = -EIO; in __aq_ring_xdp_clean()
760 next_ = buff_->next; in __aq_ring_xdp_clean()
761 buff_ = &rx_ring->buff_ring[next_]; in __aq_ring_xdp_clean()
763 aq_ring_dx_in_range(rx_ring->sw_head, in __aq_ring_xdp_clean()
765 rx_ring->hw_head); in __aq_ring_xdp_clean()
770 buff->is_error |= buff_->is_error; in __aq_ring_xdp_clean()
771 buff->is_cso_err |= buff_->is_cso_err; in __aq_ring_xdp_clean()
772 } while (!buff_->is_eop); in __aq_ring_xdp_clean()
778 if (buff->is_error || in __aq_ring_xdp_clean()
779 (buff->is_lro && buff->is_cso_err)) { in __aq_ring_xdp_clean()
782 if (buff_->next >= rx_ring->size) { in __aq_ring_xdp_clean()
783 err = -EIO; in __aq_ring_xdp_clean()
786 next_ = buff_->next; in __aq_ring_xdp_clean()
787 buff_ = &rx_ring->buff_ring[next_]; in __aq_ring_xdp_clean()
789 buff_->is_cleaned = true; in __aq_ring_xdp_clean()
790 } while (!buff_->is_eop); in __aq_ring_xdp_clean()
792 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in __aq_ring_xdp_clean()
793 ++rx_ring->stats.rx.errors; in __aq_ring_xdp_clean()
794 u64_stats_update_end(&rx_ring->stats.rx.syncp); in __aq_ring_xdp_clean()
799 if (buff->is_error) { in __aq_ring_xdp_clean()
800 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in __aq_ring_xdp_clean()
801 ++rx_ring->stats.rx.errors; in __aq_ring_xdp_clean()
802 u64_stats_update_end(&rx_ring->stats.rx.syncp); in __aq_ring_xdp_clean()
807 buff->rxdata.daddr, in __aq_ring_xdp_clean()
808 buff->rxdata.pg_off, in __aq_ring_xdp_clean()
809 buff->len, DMA_FROM_DEVICE); in __aq_ring_xdp_clean()
810 hard_start = page_address(buff->rxdata.page) + in __aq_ring_xdp_clean()
811 buff->rxdata.pg_off - rx_ring->page_offset; in __aq_ring_xdp_clean()
814 buff->len -= in __aq_ring_xdp_clean()
815 aq_ptp_extract_ts(rx_ring->aq_nic, skb, in __aq_ring_xdp_clean()
816 aq_buf_vaddr(&buff->rxdata), in __aq_ring_xdp_clean()
817 buff->len); in __aq_ring_xdp_clean()
819 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in __aq_ring_xdp_clean()
820 xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, in __aq_ring_xdp_clean()
821 buff->len, false); in __aq_ring_xdp_clean()
822 if (!buff->is_eop) { in __aq_ring_xdp_clean()
824 u64_stats_update_begin(&rx_ring->stats.rx.syncp); in __aq_ring_xdp_clean()
825 ++rx_ring->stats.rx.packets; in __aq_ring_xdp_clean()
826 rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp); in __aq_ring_xdp_clean()
827 ++rx_ring->stats.rx.xdp_aborted; in __aq_ring_xdp_clean()
828 u64_stats_update_end(&rx_ring->stats.rx.syncp); in __aq_ring_xdp_clean()
837 if (buff->is_vlan) in __aq_ring_xdp_clean()
839 buff->vlan_rx_tag); in __aq_ring_xdp_clean()
843 skb_set_hash(skb, buff->rss_hash, in __aq_ring_xdp_clean()
844 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : in __aq_ring_xdp_clean()
849 : AQ_NIC_RING2QMAP(rx_ring->aq_nic, in __aq_ring_xdp_clean()
850 rx_ring->idx)); in __aq_ring_xdp_clean()
859 int aq_ring_rx_clean(struct aq_ring_s *self, in aq_ring_rx_clean() argument
865 return __aq_ring_xdp_clean(self, napi, work_done, budget); in aq_ring_rx_clean()
867 return __aq_ring_rx_clean(self, napi, work_done, budget); in aq_ring_rx_clean()
870 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) in aq_ring_hwts_rx_clean() argument
873 while (self->sw_head != self->hw_head) { in aq_ring_hwts_rx_clean()
876 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw, in aq_ring_hwts_rx_clean()
877 self->dx_ring + in aq_ring_hwts_rx_clean()
878 (self->sw_head * self->dx_size), in aq_ring_hwts_rx_clean()
879 self->dx_size, &ns); in aq_ring_hwts_rx_clean()
882 self->sw_head = aq_ring_next_dx(self, self->sw_head); in aq_ring_hwts_rx_clean()
887 int aq_ring_rx_fill(struct aq_ring_s *self) in aq_ring_rx_fill() argument
893 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, in aq_ring_rx_fill()
894 self->size / 2)) in aq_ring_rx_fill()
897 for (i = aq_ring_avail_dx(self); i--; in aq_ring_rx_fill()
898 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { in aq_ring_rx_fill()
899 buff = &self->buff_ring[self->sw_tail]; in aq_ring_rx_fill()
901 buff->flags = 0U; in aq_ring_rx_fill()
902 buff->len = self->frame_max; in aq_ring_rx_fill()
904 err = aq_get_rxpages(self, buff); in aq_ring_rx_fill()
908 buff->pa = aq_buf_daddr(&buff->rxdata); in aq_ring_rx_fill()
916 void aq_ring_rx_deinit(struct aq_ring_s *self) in aq_ring_rx_deinit() argument
918 if (!self) in aq_ring_rx_deinit()
921 for (; self->sw_head != self->sw_tail; in aq_ring_rx_deinit()
922 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { in aq_ring_rx_deinit()
923 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_rx_deinit()
925 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); in aq_ring_rx_deinit()
929 void aq_ring_free(struct aq_ring_s *self) in aq_ring_free() argument
931 if (!self) in aq_ring_free()
934 kfree(self->buff_ring); in aq_ring_free()
936 if (self->dx_ring) in aq_ring_free()
937 dma_free_coherent(aq_nic_get_dev(self->aq_nic), in aq_ring_free()
938 self->size * self->dx_size, self->dx_ring, in aq_ring_free()
939 self->dx_ring_pa); in aq_ring_free()
942 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) in aq_ring_fill_stats_data() argument
947 if (self->ring_type == ATL_RING_RX) { in aq_ring_fill_stats_data()
951 start = u64_stats_fetch_begin(&self->stats.rx.syncp); in aq_ring_fill_stats_data()
952 data[count] = self->stats.rx.packets; in aq_ring_fill_stats_data()
953 data[++count] = self->stats.rx.jumbo_packets; in aq_ring_fill_stats_data()
954 data[++count] = self->stats.rx.lro_packets; in aq_ring_fill_stats_data()
955 data[++count] = self->stats.rx.errors; in aq_ring_fill_stats_data()
956 data[++count] = self->stats.rx.alloc_fails; in aq_ring_fill_stats_data()
957 data[++count] = self->stats.rx.skb_alloc_fails; in aq_ring_fill_stats_data()
958 data[++count] = self->stats.rx.polls; in aq_ring_fill_stats_data()
959 data[++count] = self->stats.rx.pg_flips; in aq_ring_fill_stats_data()
960 data[++count] = self->stats.rx.pg_reuses; in aq_ring_fill_stats_data()
961 data[++count] = self->stats.rx.pg_losts; in aq_ring_fill_stats_data()
962 data[++count] = self->stats.rx.xdp_aborted; in aq_ring_fill_stats_data()
963 data[++count] = self->stats.rx.xdp_drop; in aq_ring_fill_stats_data()
964 data[++count] = self->stats.rx.xdp_pass; in aq_ring_fill_stats_data()
965 data[++count] = self->stats.rx.xdp_tx; in aq_ring_fill_stats_data()
966 data[++count] = self->stats.rx.xdp_invalid; in aq_ring_fill_stats_data()
967 data[++count] = self->stats.rx.xdp_redirect; in aq_ring_fill_stats_data()
968 } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start)); in aq_ring_fill_stats_data()
973 start = u64_stats_fetch_begin(&self->stats.tx.syncp); in aq_ring_fill_stats_data()
974 data[count] = self->stats.tx.packets; in aq_ring_fill_stats_data()
975 data[++count] = self->stats.tx.queue_restarts; in aq_ring_fill_stats_data()
976 } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start)); in aq_ring_fill_stats_data()