Lines Matching full:tx

23  * We copy skb payloads into the registered segment before writing Tx
24 * descriptors and ringing the Tx doorbell.
65 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
126 /* gve_tx_free_fifo - Return space to Tx FIFO
135 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
140 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_free_ring() local
146 slots = tx->mask + 1; in gve_tx_free_ring()
147 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); in gve_tx_free_ring()
148 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_free_ring()
150 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring()
151 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring()
152 tx->q_resources = NULL; in gve_tx_free_ring()
154 if (!tx->raw_addressing) { in gve_tx_free_ring()
155 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring()
156 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring()
157 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring()
160 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring()
161 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring()
162 tx->desc = NULL; in gve_tx_free_ring()
164 vfree(tx->info); in gve_tx_free_ring()
165 tx->info = NULL; in gve_tx_free_ring()
167 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); in gve_tx_free_ring()
172 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_alloc_ring() local
178 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring()
179 spin_lock_init(&tx->clean_lock); in gve_tx_alloc_ring()
180 tx->q_num = idx; in gve_tx_alloc_ring()
182 tx->mask = slots - 1; in gve_tx_alloc_ring()
185 tx->info = vzalloc(sizeof(*tx->info) * slots); in gve_tx_alloc_ring()
186 if (!tx->info) in gve_tx_alloc_ring()
189 /* alloc tx queue */ in gve_tx_alloc_ring()
190 bytes = sizeof(*tx->desc) * slots; in gve_tx_alloc_ring()
191 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring()
192 if (!tx->desc) in gve_tx_alloc_ring()
195 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_tx_alloc_ring()
196 tx->dev = &priv->pdev->dev; in gve_tx_alloc_ring()
197 if (!tx->raw_addressing) { in gve_tx_alloc_ring()
198 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); in gve_tx_alloc_ring()
199 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring()
201 /* map Tx FIFO */ in gve_tx_alloc_ring()
202 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring()
206 tx->q_resources = in gve_tx_alloc_ring()
208 sizeof(*tx->q_resources), in gve_tx_alloc_ring()
209 &tx->q_resources_bus, in gve_tx_alloc_ring()
211 if (!tx->q_resources) in gve_tx_alloc_ring()
214 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, in gve_tx_alloc_ring()
215 (unsigned long)tx->bus); in gve_tx_alloc_ring()
216 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_alloc_ring()
222 if (!tx->raw_addressing) in gve_tx_alloc_ring()
223 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring()
225 if (!tx->raw_addressing) in gve_tx_alloc_ring()
226 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_alloc_ring()
228 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring()
229 tx->desc = NULL; in gve_tx_alloc_ring()
231 vfree(tx->info); in gve_tx_alloc_ring()
232 tx->info = NULL; in gve_tx_alloc_ring()
245 "Failed to alloc tx ring=%d: err=%d\n", in gve_tx_alloc_rings()
269 * @tx: tx ring to check
275 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
277 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
280 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
290 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
325 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
329 if (!tx->raw_addressing) in gve_can_tx()
330 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
332 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
338 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_maybe_stop_tx() argument
346 if (!tx->raw_addressing) in gve_maybe_stop_tx()
347 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
349 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
353 spin_lock(&tx->clean_lock); in gve_maybe_stop_tx()
354 nic_done = gve_tx_load_event_counter(priv, tx); in gve_maybe_stop_tx()
355 to_do = nic_done - tx->done; in gve_maybe_stop_tx()
357 /* Only try to clean if there is hope for TX */ in gve_maybe_stop_tx()
358 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { in gve_maybe_stop_tx()
361 gve_clean_tx_done(priv, tx, to_do, false); in gve_maybe_stop_tx()
363 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
368 tx->stop_queue++; in gve_maybe_stop_tx()
369 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
371 spin_unlock(&tx->clean_lock); in gve_maybe_stop_tx()
440 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
447 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
453 info = &tx->info[idx]; in gve_tx_add_skb_copy()
454 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
469 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
470 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
473 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
481 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
483 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
489 next_idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_copy()
490 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); in gve_tx_add_skb_copy()
494 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
495 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
502 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
504 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
513 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
522 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
527 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
528 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
541 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
542 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
543 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
559 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
560 mtd_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
570 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
571 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
578 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
579 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
581 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
582 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
583 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
586 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
587 dma_unmap_len_set(&tx->info[idx], len, len); in gve_tx_add_skb_no_copy()
588 dma_unmap_addr_set(&tx->info[idx], dma, addr); in gve_tx_add_skb_no_copy()
602 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
605 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
612 struct gve_tx_ring *tx; in gve_tx() local
617 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
618 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { in gve_tx()
619 /* We need to ring the txq doorbell -- we have stopped the Tx in gve_tx()
624 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
627 if (tx->raw_addressing) in gve_tx()
628 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
630 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
634 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
636 tx->req += nsegs; in gve_tx()
641 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
647 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
653 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
664 idx = tx->done & tx->mask; in gve_clean_tx_done()
667 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
668 info = &tx->info[idx]; in gve_clean_tx_done()
672 if (tx->raw_addressing) in gve_clean_tx_done()
673 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
674 tx->done++; in gve_clean_tx_done()
681 if (tx->raw_addressing) in gve_clean_tx_done()
692 if (!tx->raw_addressing) in gve_clean_tx_done()
693 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
694 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
695 tx->bytes_done += bytes; in gve_clean_tx_done()
696 tx->pkt_done += pkts; in gve_clean_tx_done()
697 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
698 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
705 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
706 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
707 tx->wake_queue++; in gve_clean_tx_done()
708 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
715 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
717 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); in gve_tx_load_event_counter()
726 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
734 /* In TX path, it may try to clean completed pkts in order to xmit, in gve_tx_poll()
738 spin_lock(&tx->clean_lock); in gve_tx_poll()
740 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
741 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
742 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
743 spin_unlock(&tx->clean_lock); in gve_tx_poll()
745 return nic_done != tx->done; in gve_tx_poll()
748 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_clean_pending() argument
750 u32 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_clean_pending()
752 return nic_done != tx->done; in gve_tx_clean_pending()