Lines Matching refs:tx
26 struct gve_tx_ring *tx = &priv->tx[tx_qid]; in gve_xdp_tx_flush() local
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_tx_flush()
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_xdp_done() argument
161 u32 clean_end = tx->done + to_do; in gve_clean_xdp_done()
167 for (; tx->done < clean_end; tx->done++) { in gve_clean_xdp_done()
168 idx = tx->done & tx->mask; in gve_clean_xdp_done()
169 info = &tx->info[idx]; in gve_clean_xdp_done()
186 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_xdp_done()
187 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done()
188 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done()
189 u64_stats_update_begin(&tx->statss); in gve_clean_xdp_done()
190 tx->bytes_done += bytes; in gve_clean_xdp_done()
191 tx->pkt_done += pkts; in gve_clean_xdp_done()
192 u64_stats_update_end(&tx->statss); in gve_clean_xdp_done()
196 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
201 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_free_ring() local
207 slots = tx->mask + 1; in gve_tx_free_ring()
208 if (tx->q_num < priv->tx_cfg.num_queues) { in gve_tx_free_ring()
209 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); in gve_tx_free_ring()
210 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_free_ring()
212 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); in gve_tx_free_ring()
215 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring()
216 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring()
217 tx->q_resources = NULL; in gve_tx_free_ring()
219 if (!tx->raw_addressing) { in gve_tx_free_ring()
220 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring()
221 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring()
222 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring()
225 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring()
226 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring()
227 tx->desc = NULL; in gve_tx_free_ring()
229 vfree(tx->info); in gve_tx_free_ring()
230 tx->info = NULL; in gve_tx_free_ring()
237 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_alloc_ring() local
243 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring()
244 spin_lock_init(&tx->clean_lock); in gve_tx_alloc_ring()
245 spin_lock_init(&tx->xdp_lock); in gve_tx_alloc_ring()
246 tx->q_num = idx; in gve_tx_alloc_ring()
248 tx->mask = slots - 1; in gve_tx_alloc_ring()
251 tx->info = vcalloc(slots, sizeof(*tx->info)); in gve_tx_alloc_ring()
252 if (!tx->info) in gve_tx_alloc_ring()
256 bytes = sizeof(*tx->desc) * slots; in gve_tx_alloc_ring()
257 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring()
258 if (!tx->desc) in gve_tx_alloc_ring()
261 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_tx_alloc_ring()
262 tx->dev = &priv->pdev->dev; in gve_tx_alloc_ring()
263 if (!tx->raw_addressing) { in gve_tx_alloc_ring()
264 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); in gve_tx_alloc_ring()
265 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring()
268 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring()
272 tx->q_resources = in gve_tx_alloc_ring()
274 sizeof(*tx->q_resources), in gve_tx_alloc_ring()
275 &tx->q_resources_bus, in gve_tx_alloc_ring()
277 if (!tx->q_resources) in gve_tx_alloc_ring()
281 (unsigned long)tx->bus); in gve_tx_alloc_ring()
283 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_alloc_ring()
289 if (!tx->raw_addressing) in gve_tx_alloc_ring()
290 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring()
292 if (!tx->raw_addressing) in gve_tx_alloc_ring()
293 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_alloc_ring()
295 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring()
296 tx->desc = NULL; in gve_tx_alloc_ring()
298 vfree(tx->info); in gve_tx_alloc_ring()
299 tx->info = NULL; in gve_tx_alloc_ring()
342 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
344 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
347 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
357 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
392 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
396 if (!tx->raw_addressing) in gve_can_tx()
397 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
399 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
405 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_maybe_stop_tx() argument
413 if (!tx->raw_addressing) in gve_maybe_stop_tx()
414 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
416 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
420 spin_lock(&tx->clean_lock); in gve_maybe_stop_tx()
421 nic_done = gve_tx_load_event_counter(priv, tx); in gve_maybe_stop_tx()
422 to_do = nic_done - tx->done; in gve_maybe_stop_tx()
425 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { in gve_maybe_stop_tx()
428 gve_clean_tx_done(priv, tx, to_do, false); in gve_maybe_stop_tx()
430 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
435 tx->stop_queue++; in gve_maybe_stop_tx()
436 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
438 spin_unlock(&tx->clean_lock); in gve_maybe_stop_tx()
508 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
515 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
521 info = &tx->info[idx]; in gve_tx_add_skb_copy()
522 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
535 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
536 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
539 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
548 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
550 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
556 next_idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_copy()
557 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); in gve_tx_add_skb_copy()
561 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
562 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
571 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
573 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
582 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
591 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
596 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
597 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
610 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
611 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
612 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
629 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
630 mtd_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
640 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
641 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
650 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
651 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
653 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
654 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
655 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
658 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
659 dma_unmap_len_set(&tx->info[idx], len, len); in gve_tx_add_skb_no_copy()
660 dma_unmap_addr_set(&tx->info[idx], dma, addr); in gve_tx_add_skb_no_copy()
676 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
679 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
686 struct gve_tx_ring *tx; in gve_tx() local
691 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
692 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { in gve_tx()
698 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
701 if (tx->raw_addressing) in gve_tx()
702 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
704 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
708 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
710 tx->req += nsegs; in gve_tx()
715 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
721 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
725 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_fill_xdp() argument
730 u32 reqi = tx->req; in gve_tx_fill_xdp()
732 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); in gve_tx_fill_xdp()
735 info = &tx->info[reqi & tx->mask]; in gve_tx_fill_xdp()
740 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, in gve_tx_fill_xdp()
748 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, in gve_tx_fill_xdp()
753 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], in gve_tx_fill_xdp()
758 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, in gve_tx_fill_xdp()
761 tx->tx_fifo.qpl->page_buses, in gve_tx_fill_xdp()
776 struct gve_tx_ring *tx; in gve_xdp_xmit() local
785 tx = &priv->tx[qid]; in gve_xdp_xmit()
787 spin_lock(&tx->xdp_lock); in gve_xdp_xmit()
789 err = gve_xdp_xmit_one(priv, tx, frames[i]->data, in gve_xdp_xmit()
796 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_xmit()
798 spin_unlock(&tx->xdp_lock); in gve_xdp_xmit()
800 u64_stats_update_begin(&tx->statss); in gve_xdp_xmit()
801 tx->xdp_xmit += n; in gve_xdp_xmit()
802 tx->xdp_xmit_errors += n - i; in gve_xdp_xmit()
803 u64_stats_update_end(&tx->statss); in gve_xdp_xmit()
808 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xdp_xmit_one() argument
813 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) in gve_xdp_xmit_one()
816 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); in gve_xdp_xmit_one()
817 tx->req += nsegs; in gve_xdp_xmit_one()
824 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
835 idx = tx->done & tx->mask; in gve_clean_tx_done()
838 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
839 info = &tx->info[idx]; in gve_clean_tx_done()
843 if (tx->raw_addressing) in gve_clean_tx_done()
844 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
845 tx->done++; in gve_clean_tx_done()
852 if (tx->raw_addressing) in gve_clean_tx_done()
858 if (!tx->raw_addressing) in gve_clean_tx_done()
859 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
860 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
861 tx->bytes_done += bytes; in gve_clean_tx_done()
862 tx->pkt_done += pkts; in gve_clean_tx_done()
863 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
864 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
871 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
872 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
873 tx->wake_queue++; in gve_clean_tx_done()
874 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
881 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
883 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); in gve_tx_load_event_counter()
889 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xsk_tx() argument
896 spin_lock(&tx->xdp_lock); in gve_xsk_tx()
898 if (!gve_can_tx(tx, GVE_TX_START_THRESH)) in gve_xsk_tx()
901 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { in gve_xsk_tx()
902 tx->xdp_xsk_done = tx->xdp_xsk_wakeup; in gve_xsk_tx()
906 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx()
907 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); in gve_xsk_tx()
908 tx->req += nsegs; in gve_xsk_tx()
913 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xsk_tx()
914 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx()
916 spin_unlock(&tx->xdp_lock); in gve_xsk_tx()
923 struct gve_tx_ring *tx = block->tx; in gve_xdp_poll() local
933 nic_done = gve_tx_load_event_counter(priv, tx); in gve_xdp_poll()
934 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_xdp_poll()
935 gve_clean_xdp_done(priv, tx, to_do); in gve_xdp_poll()
936 repoll = nic_done != tx->done; in gve_xdp_poll()
938 if (tx->xsk_pool) { in gve_xdp_poll()
939 int sent = gve_xsk_tx(priv, tx, budget); in gve_xdp_poll()
941 u64_stats_update_begin(&tx->statss); in gve_xdp_poll()
942 tx->xdp_xsk_sent += sent; in gve_xdp_poll()
943 u64_stats_update_end(&tx->statss); in gve_xdp_poll()
945 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xdp_poll()
946 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xdp_poll()
956 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
968 spin_lock(&tx->clean_lock); in gve_tx_poll()
970 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
971 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
972 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
973 spin_unlock(&tx->clean_lock); in gve_tx_poll()
975 return nic_done != tx->done; in gve_tx_poll()
978 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_clean_pending() argument
980 u32 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_clean_pending()
982 return nic_done != tx->done; in gve_tx_clean_pending()