Lines Matching refs:tx_ring
35 struct ice_ring *tx_ring; in ice_prgm_fdir_fltr() local
44 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
45 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
47 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
50 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
63 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
64 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
65 f_desc = ICE_TX_FDIRDESC(tx_ring, i); in ice_prgm_fdir_fltr()
69 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
70 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr()
71 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
74 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
98 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
145 void ice_clean_tx_ring(struct ice_ring *tx_ring) in ice_clean_tx_ring() argument
149 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
150 ice_xsk_clean_xdp_ring(tx_ring); in ice_clean_tx_ring()
155 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
159 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
160 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
163 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
166 memset(tx_ring->desc, 0, tx_ring->size); in ice_clean_tx_ring()
168 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
169 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
171 if (!tx_ring->netdev) in ice_clean_tx_ring()
175 netdev_tx_reset_queue(txring_txq(tx_ring)); in ice_clean_tx_ring()
184 void ice_free_tx_ring(struct ice_ring *tx_ring) in ice_free_tx_ring() argument
186 ice_clean_tx_ring(tx_ring); in ice_free_tx_ring()
187 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
188 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
190 if (tx_ring->desc) { in ice_free_tx_ring()
191 dmam_free_coherent(tx_ring->dev, tx_ring->size, in ice_free_tx_ring()
192 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
193 tx_ring->desc = NULL; in ice_free_tx_ring()
204 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) in ice_clean_tx_irq() argument
208 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
209 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
213 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
214 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_tx_irq()
215 i -= tx_ring->count; in ice_clean_tx_irq()
228 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
241 if (ice_ring_is_xdp(tx_ring)) in ice_clean_tx_irq()
248 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
259 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
264 i -= tx_ring->count; in ice_clean_tx_irq()
265 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
266 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq()
271 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
278 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); in ice_clean_tx_irq()
285 i -= tx_ring->count; in ice_clean_tx_irq()
286 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
287 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_tx_irq()
296 i += tx_ring->count; in ice_clean_tx_irq()
297 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
299 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); in ice_clean_tx_irq()
301 if (ice_ring_is_xdp(tx_ring)) in ice_clean_tx_irq()
304 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, in ice_clean_tx_irq()
308 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
309 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in ice_clean_tx_irq()
314 if (__netif_subqueue_stopped(tx_ring->netdev, in ice_clean_tx_irq()
315 tx_ring->q_index) && in ice_clean_tx_irq()
317 netif_wake_subqueue(tx_ring->netdev, in ice_clean_tx_irq()
318 tx_ring->q_index); in ice_clean_tx_irq()
319 ++tx_ring->tx_stats.restart_q; in ice_clean_tx_irq()
332 int ice_setup_tx_ring(struct ice_ring *tx_ring) in ice_setup_tx_ring() argument
334 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
340 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
341 tx_ring->tx_buf = in ice_setup_tx_ring()
342 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, in ice_setup_tx_ring()
344 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
348 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
350 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, in ice_setup_tx_ring()
352 if (!tx_ring->desc) { in ice_setup_tx_ring()
354 tx_ring->size); in ice_setup_tx_ring()
358 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
359 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
360 tx_ring->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
364 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
365 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
1465 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) in __ice_maybe_stop_tx() argument
1467 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); in __ice_maybe_stop_tx()
1472 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) in __ice_maybe_stop_tx()
1476 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); in __ice_maybe_stop_tx()
1477 ++tx_ring->tx_stats.restart_q; in __ice_maybe_stop_tx()
1488 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) in ice_maybe_stop_tx() argument
1490 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) in ice_maybe_stop_tx()
1493 return __ice_maybe_stop_tx(tx_ring, size); in ice_maybe_stop_tx()
1507 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, in ice_tx_map() argument
1511 u16 i = tx_ring->next_to_use; in ice_tx_map()
1527 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_tx_map()
1535 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1542 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1564 if (i == tx_ring->count) { in ice_tx_map()
1565 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_tx_map()
1585 if (i == tx_ring->count) { in ice_tx_map()
1586 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_tx_map()
1593 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1596 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1600 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ice_tx_map()
1606 if (i == tx_ring->count) in ice_tx_map()
1625 tx_ring->next_to_use = i; in ice_tx_map()
1627 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); in ice_tx_map()
1630 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) in ice_tx_map()
1631 writel(i, tx_ring->tail); in ice_tx_map()
1638 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1639 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); in ice_tx_map()
1643 i = tx_ring->count; in ice_tx_map()
1647 tx_ring->next_to_use = i; in ice_tx_map()
1843 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) in ice_tx_prepare_vlan_flags() argument
1859 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); in ice_tx_prepare_vlan_flags()
2149 ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb, in ice_tstamp() argument
2158 if (!tx_ring->ptp_tx) in ice_tstamp()
2166 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2184 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) in ice_xmit_frame_ring() argument
2187 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2193 ice_trace(xmit_frame_ring, tx_ring, skb); in ice_xmit_frame_ring()
2200 tx_ring->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2209 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + in ice_xmit_frame_ring()
2211 tx_ring->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2215 offload.tx_ring = tx_ring; in ice_xmit_frame_ring()
2218 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2225 ice_tx_prepare_vlan_flags(tx_ring, first); in ice_xmit_frame_ring()
2247 ice_tstamp(tx_ring, skb, first, &offload); in ice_xmit_frame_ring()
2251 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2254 cdesc = ICE_TX_CTX_DESC(tx_ring, i); in ice_xmit_frame_ring()
2256 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2265 ice_tx_map(tx_ring, first, &offload); in ice_xmit_frame_ring()
2269 ice_trace(xmit_frame_ring_drop, tx_ring, skb); in ice_xmit_frame_ring()
2285 struct ice_ring *tx_ring; in ice_start_xmit() local
2287 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2295 return ice_xmit_frame_ring(skb, tx_ring); in ice_start_xmit()
2302 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) in ice_clean_ctrl_tx_irq() argument
2304 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2305 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2310 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2311 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_clean_ctrl_tx_irq()
2312 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2339 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2340 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2341 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_ctrl_tx_irq()
2346 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2351 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2366 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2367 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2368 tx_desc = ICE_TX_DESC(tx_ring, 0); in ice_clean_ctrl_tx_irq()
2374 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2375 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()