Lines Matching full:tx
23 * We copy skb payloads into the registered segment before writing Tx
24 * descriptors and ringing the Tx doorbell.
65 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
126 /* gve_tx_free_fifo - Return space to Tx FIFO
135 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
140 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_free_ring() local
146 slots = tx->mask + 1; in gve_tx_free_ring()
147 gve_clean_tx_done(priv, tx, tx->req, false); in gve_tx_free_ring()
148 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_free_ring()
150 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring()
151 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring()
152 tx->q_resources = NULL; in gve_tx_free_ring()
154 if (!tx->raw_addressing) { in gve_tx_free_ring()
155 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring()
156 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring()
157 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring()
160 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring()
161 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring()
162 tx->desc = NULL; in gve_tx_free_ring()
164 vfree(tx->info); in gve_tx_free_ring()
165 tx->info = NULL; in gve_tx_free_ring()
167 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); in gve_tx_free_ring()
172 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_alloc_ring() local
178 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring()
179 tx->q_num = idx; in gve_tx_alloc_ring()
181 tx->mask = slots - 1; in gve_tx_alloc_ring()
184 tx->info = vzalloc(sizeof(*tx->info) * slots); in gve_tx_alloc_ring()
185 if (!tx->info) in gve_tx_alloc_ring()
188 /* alloc tx queue */ in gve_tx_alloc_ring()
189 bytes = sizeof(*tx->desc) * slots; in gve_tx_alloc_ring()
190 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring()
191 if (!tx->desc) in gve_tx_alloc_ring()
194 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_tx_alloc_ring()
195 tx->dev = &priv->pdev->dev; in gve_tx_alloc_ring()
196 if (!tx->raw_addressing) { in gve_tx_alloc_ring()
197 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); in gve_tx_alloc_ring()
198 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring()
200 /* map Tx FIFO */ in gve_tx_alloc_ring()
201 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring()
205 tx->q_resources = in gve_tx_alloc_ring()
207 sizeof(*tx->q_resources), in gve_tx_alloc_ring()
208 &tx->q_resources_bus, in gve_tx_alloc_ring()
210 if (!tx->q_resources) in gve_tx_alloc_ring()
213 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, in gve_tx_alloc_ring()
214 (unsigned long)tx->bus); in gve_tx_alloc_ring()
215 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_alloc_ring()
221 if (!tx->raw_addressing) in gve_tx_alloc_ring()
222 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring()
224 if (!tx->raw_addressing) in gve_tx_alloc_ring()
225 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_alloc_ring()
227 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring()
228 tx->desc = NULL; in gve_tx_alloc_ring()
230 vfree(tx->info); in gve_tx_alloc_ring()
231 tx->info = NULL; in gve_tx_alloc_ring()
244 "Failed to alloc tx ring=%d: err=%d\n", in gve_tx_alloc_rings()
268 * @tx: tx ring to check
274 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
276 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
279 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
289 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
321 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
325 if (!tx->raw_addressing) in gve_can_tx()
326 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
328 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
332 static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) in gve_maybe_stop_tx() argument
336 if (!tx->raw_addressing) in gve_maybe_stop_tx()
337 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
339 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
343 tx->stop_queue++; in gve_maybe_stop_tx()
344 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
354 * if (tx queue stopped) in gve_maybe_stop_tx()
359 if (likely(!gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
362 netif_tx_start_queue(tx->netdev_txq); in gve_maybe_stop_tx()
363 tx->wake_queue++; in gve_maybe_stop_tx()
418 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
424 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
430 info = &tx->info[idx]; in gve_tx_add_skb_copy()
431 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
446 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
447 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
450 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
458 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
460 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
466 next_idx = (tx->req + 1 + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
467 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
474 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
476 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
485 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
493 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
499 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
500 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
513 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
514 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
515 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
533 idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_no_copy()
534 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
544 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
545 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
547 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
548 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
549 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
552 buf = &tx->info[idx].buf; in gve_tx_add_skb_no_copy()
553 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
566 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
569 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
576 struct gve_tx_ring *tx; in gve_tx() local
581 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
582 if (unlikely(gve_maybe_stop_tx(tx, skb))) { in gve_tx()
583 /* We need to ring the txq doorbell -- we have stopped the Tx in gve_tx()
588 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
591 if (tx->raw_addressing) in gve_tx()
592 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
594 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
598 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
600 tx->req += nsegs; in gve_tx()
605 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
611 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
617 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
628 idx = tx->done & tx->mask; in gve_clean_tx_done()
631 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
632 info = &tx->info[idx]; in gve_clean_tx_done()
636 if (tx->raw_addressing) in gve_clean_tx_done()
637 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
638 tx->done++; in gve_clean_tx_done()
645 if (tx->raw_addressing) in gve_clean_tx_done()
656 if (!tx->raw_addressing) in gve_clean_tx_done()
657 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
658 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
659 tx->bytes_done += bytes; in gve_clean_tx_done()
660 tx->pkt_done += pkts; in gve_clean_tx_done()
661 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
662 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
669 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
670 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
671 tx->wake_queue++; in gve_clean_tx_done()
672 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
679 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
681 u32 counter_index = be32_to_cpu((tx->q_resources->counter_index)); in gve_tx_load_event_counter()
689 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
699 tx->last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
700 nic_done = be32_to_cpu(tx->last_nic_done); in gve_tx_poll()
705 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
706 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
709 repoll |= (nic_done != tx->done); in gve_tx_poll()