Lines Matching refs:pfvf
46 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, in otx2_dma_map_skb_frag() argument
64 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); in otx2_dma_map_skb_frag()
67 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) in otx2_dma_unmap_skb_frags() argument
72 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], in otx2_dma_unmap_skb_frags()
78 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, in otx2_snd_pkt_handler() argument
91 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) in otx2_snd_pkt_handler()
93 pfvf->netdev->name, cq->cint_idx, in otx2_snd_pkt_handler()
104 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); in otx2_snd_pkt_handler()
115 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_snd_pkt_handler()
120 static void otx2_set_rxtstamp(struct otx2_nic *pfvf, in otx2_set_rxtstamp() argument
126 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) in otx2_set_rxtstamp()
130 err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns); in otx2_set_rxtstamp()
137 static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, in otx2_skb_add_frag() argument
144 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); in otx2_skb_add_frag()
153 otx2_set_rxtstamp(pfvf, skb, va); in otx2_skb_add_frag()
160 va - page_address(page) + off, len - off, pfvf->rbsize); in otx2_skb_add_frag()
162 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, in otx2_skb_add_frag()
163 pfvf->rbsize, DMA_FROM_DEVICE); in otx2_skb_add_frag()
166 static void otx2_set_rxhash(struct otx2_nic *pfvf, in otx2_set_rxhash() argument
173 if (!(pfvf->netdev->features & NETIF_F_RXHASH)) in otx2_set_rxhash()
176 rss = &pfvf->hw.rss_info; in otx2_set_rxhash()
188 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument
202 otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL); in otx2_free_rcv_seg()
207 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, in otx2_check_rcv_errors() argument
210 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; in otx2_check_rcv_errors()
213 if (netif_msg_rx_err(pfvf)) in otx2_check_rcv_errors()
214 netdev_err(pfvf->netdev, in otx2_check_rcv_errors()
263 if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL)) in otx2_check_rcv_errors()
268 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors()
272 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, in otx2_rcv_pkt_handler() argument
281 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) in otx2_rcv_pkt_handler()
289 otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse); in otx2_rcv_pkt_handler()
292 otx2_set_rxhash(pfvf, cqe, skb); in otx2_rcv_pkt_handler()
295 if (pfvf->netdev->features & NETIF_F_RXCSUM) in otx2_rcv_pkt_handler()
301 static int otx2_rx_napi_handler(struct otx2_nic *pfvf, in otx2_rx_napi_handler() argument
320 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe); in otx2_rx_napi_handler()
328 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_rx_napi_handler()
336 bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool); in otx2_rx_napi_handler()
341 work = &pfvf->refill_wrk[cq->cq_idx]; in otx2_rx_napi_handler()
351 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); in otx2_rx_napi_handler()
358 static int otx2_tx_napi_handler(struct otx2_nic *pfvf, in otx2_tx_napi_handler() argument
372 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], in otx2_tx_napi_handler()
380 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_tx_napi_handler()
386 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx); in otx2_tx_napi_handler()
391 netif_carrier_ok(pfvf->netdev)) in otx2_tx_napi_handler()
403 struct otx2_nic *pfvf; in otx2_napi_handler() local
406 pfvf = (struct otx2_nic *)cq_poll->dev; in otx2_napi_handler()
407 qset = &pfvf->qset; in otx2_napi_handler()
420 workdone += otx2_rx_napi_handler(pfvf, napi, in otx2_napi_handler()
423 workdone += otx2_tx_napi_handler(pfvf, cq, budget); in otx2_napi_handler()
428 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); in otx2_napi_handler()
432 if (pfvf->flags & OTX2_FLAG_INTF_DOWN) in otx2_napi_handler()
436 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), in otx2_napi_handler()
460 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_sg() argument
486 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); in otx2_sqe_add_sg()
487 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_sqe_add_sg()
505 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_ext() argument
519 ext->lso_format = pfvf->hw.lso_tsov4_idx; in otx2_sqe_add_ext()
528 ext->lso_format = pfvf->hw.lso_tsov6_idx; in otx2_sqe_add_ext()
547 ext->lso_format = pfvf->hw.lso_udpv4_idx; in otx2_sqe_add_ext()
550 ext->lso_format = pfvf->hw.lso_udpv6_idx; in otx2_sqe_add_ext()
577 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_hdr() argument
624 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, in otx2_dma_map_tso_skb() argument
642 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); in otx2_dma_map_tso_skb()
643 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_dma_map_tso_skb()
653 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_dma_map_tso_skb()
706 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sq_append_tso() argument
709 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_sq_append_tso()
721 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { in otx2_sq_append_tso()
738 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_tso()
791 static bool is_hw_tso_supported(struct otx2_nic *pfvf, in is_hw_tso_supported() argument
796 if (!pfvf->hw.hw_tso) in is_hw_tso_supported()
803 if (!is_96xx_B0(pfvf->pdev)) in is_hw_tso_supported()
814 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) in otx2_get_sqe_count() argument
820 if (is_hw_tso_supported(pfvf, skb)) in otx2_get_sqe_count()
827 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, in otx2_set_txtstamp() argument
846 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_sq_append_skb() local
857 free_sqe < otx2_get_sqe_count(pfvf, skb)) in otx2_sq_append_skb()
873 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { in otx2_sq_append_skb()
874 otx2_sq_append_tso(pfvf, sq, skb, qidx); in otx2_sq_append_skb()
883 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_skb()
887 otx2_sqe_add_ext(pfvf, sq, skb, &offset); in otx2_sq_append_skb()
890 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { in otx2_sq_append_skb()
891 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); in otx2_sq_append_skb()
895 otx2_set_txtstamp(pfvf, skb, sq, &offset); in otx2_sq_append_skb()
908 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) in otx2_cleanup_rx_cqes() argument
919 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); in otx2_cleanup_rx_cqes()
923 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_cleanup_rx_cqes()
924 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); in otx2_cleanup_rx_cqes()
929 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_cleanup_rx_cqes()
933 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) in otx2_cleanup_tx_cqes() argument
941 sq = &pfvf->qset.sq[cq->cint_idx]; in otx2_cleanup_tx_cqes()
947 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_cleanup_tx_cqes()
955 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_cleanup_tx_cqes()
959 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) in otx2_rxtx_enable() argument
964 mutex_lock(&pfvf->mbox.lock); in otx2_rxtx_enable()
966 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); in otx2_rxtx_enable()
968 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); in otx2_rxtx_enable()
971 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
975 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rxtx_enable()
976 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()