Lines Matching refs:tx_ring
57 void iavf_clean_tx_ring(struct iavf_ring *tx_ring) in iavf_clean_tx_ring() argument
63 if (!tx_ring->tx_bi) in iavf_clean_tx_ring()
67 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring()
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring()
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring()
71 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring()
74 memset(tx_ring->desc, 0, tx_ring->size); in iavf_clean_tx_ring()
76 tx_ring->next_to_use = 0; in iavf_clean_tx_ring()
77 tx_ring->next_to_clean = 0; in iavf_clean_tx_ring()
79 if (!tx_ring->netdev) in iavf_clean_tx_ring()
83 netdev_tx_reset_queue(txring_txq(tx_ring)); in iavf_clean_tx_ring()
92 void iavf_free_tx_resources(struct iavf_ring *tx_ring) in iavf_free_tx_resources() argument
94 iavf_clean_tx_ring(tx_ring); in iavf_free_tx_resources()
95 kfree(tx_ring->tx_bi); in iavf_free_tx_resources()
96 tx_ring->tx_bi = NULL; in iavf_free_tx_resources()
98 if (tx_ring->desc) { in iavf_free_tx_resources()
99 dma_free_coherent(tx_ring->dev, tx_ring->size, in iavf_free_tx_resources()
100 tx_ring->desc, tx_ring->dma); in iavf_free_tx_resources()
101 tx_ring->desc = NULL; in iavf_free_tx_resources()
136 struct iavf_ring *tx_ring = NULL; in iavf_detect_recover_hung() local
155 tx_ring = &vsi->back->tx_rings[i]; in iavf_detect_recover_hung()
156 if (tx_ring && tx_ring->desc) { in iavf_detect_recover_hung()
164 packets = tx_ring->stats.packets & INT_MAX; in iavf_detect_recover_hung()
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in iavf_detect_recover_hung()
166 iavf_force_wb(vsi, tx_ring->q_vector); in iavf_detect_recover_hung()
174 tx_ring->tx_stats.prev_pkt_ctr = in iavf_detect_recover_hung()
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1; in iavf_detect_recover_hung()
191 struct iavf_ring *tx_ring, int napi_budget) in iavf_clean_tx_irq() argument
193 int i = tx_ring->next_to_clean; in iavf_clean_tx_irq()
199 tx_buf = &tx_ring->tx_bi[i]; in iavf_clean_tx_irq()
200 tx_desc = IAVF_TX_DESC(tx_ring, i); in iavf_clean_tx_irq()
201 i -= tx_ring->count; in iavf_clean_tx_irq()
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in iavf_clean_tx_irq()
230 dma_unmap_single(tx_ring->dev, in iavf_clean_tx_irq()
242 tx_ring, tx_desc, tx_buf); in iavf_clean_tx_irq()
248 i -= tx_ring->count; in iavf_clean_tx_irq()
249 tx_buf = tx_ring->tx_bi; in iavf_clean_tx_irq()
250 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_clean_tx_irq()
255 dma_unmap_page(tx_ring->dev, in iavf_clean_tx_irq()
268 i -= tx_ring->count; in iavf_clean_tx_irq()
269 tx_buf = tx_ring->tx_bi; in iavf_clean_tx_irq()
270 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_clean_tx_irq()
279 i += tx_ring->count; in iavf_clean_tx_irq()
280 tx_ring->next_to_clean = i; in iavf_clean_tx_irq()
281 u64_stats_update_begin(&tx_ring->syncp); in iavf_clean_tx_irq()
282 tx_ring->stats.bytes += total_bytes; in iavf_clean_tx_irq()
283 tx_ring->stats.packets += total_packets; in iavf_clean_tx_irq()
284 u64_stats_update_end(&tx_ring->syncp); in iavf_clean_tx_irq()
285 tx_ring->q_vector->tx.total_bytes += total_bytes; in iavf_clean_tx_irq()
286 tx_ring->q_vector->tx.total_packets += total_packets; in iavf_clean_tx_irq()
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { in iavf_clean_tx_irq()
294 unsigned int j = iavf_get_tx_pending(tx_ring, false); in iavf_clean_tx_irq()
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) in iavf_clean_tx_irq()
300 tx_ring->arm_wb = true; in iavf_clean_tx_irq()
304 netdev_tx_completed_queue(txring_txq(tx_ring), in iavf_clean_tx_irq()
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in iavf_clean_tx_irq()
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in iavf_clean_tx_irq()
314 if (__netif_subqueue_stopped(tx_ring->netdev, in iavf_clean_tx_irq()
315 tx_ring->queue_index) && in iavf_clean_tx_irq()
317 netif_wake_subqueue(tx_ring->netdev, in iavf_clean_tx_irq()
318 tx_ring->queue_index); in iavf_clean_tx_irq()
319 ++tx_ring->tx_stats.restart_queue; in iavf_clean_tx_irq()
614 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) in iavf_setup_tx_descriptors() argument
616 struct device *dev = tx_ring->dev; in iavf_setup_tx_descriptors()
623 WARN_ON(tx_ring->tx_bi); in iavf_setup_tx_descriptors()
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_setup_tx_descriptors()
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in iavf_setup_tx_descriptors()
626 if (!tx_ring->tx_bi) in iavf_setup_tx_descriptors()
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); in iavf_setup_tx_descriptors()
631 tx_ring->size = ALIGN(tx_ring->size, 4096); in iavf_setup_tx_descriptors()
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in iavf_setup_tx_descriptors()
633 &tx_ring->dma, GFP_KERNEL); in iavf_setup_tx_descriptors()
634 if (!tx_ring->desc) { in iavf_setup_tx_descriptors()
636 tx_ring->size); in iavf_setup_tx_descriptors()
640 tx_ring->next_to_use = 0; in iavf_setup_tx_descriptors()
641 tx_ring->next_to_clean = 0; in iavf_setup_tx_descriptors()
642 tx_ring->tx_stats.prev_pkt_ctr = -1; in iavf_setup_tx_descriptors()
646 kfree(tx_ring->tx_bi); in iavf_setup_tx_descriptors()
647 tx_ring->tx_bi = NULL; in iavf_setup_tx_descriptors()
1798 struct iavf_ring *tx_ring, in iavf_tx_prepare_vlan_flags() argument
1805 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in iavf_tx_prepare_vlan_flags()
1958 struct iavf_ring *tx_ring, in iavf_tx_enable_csum() argument
2120 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, in iavf_create_tx_ctx() argument
2125 int i = tx_ring->next_to_use; in iavf_create_tx_ctx()
2132 context_desc = IAVF_TX_CTXTDESC(tx_ring, i); in iavf_create_tx_ctx()
2135 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in iavf_create_tx_ctx()
2235 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) in __iavf_maybe_stop_tx() argument
2237 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __iavf_maybe_stop_tx()
2242 if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) in __iavf_maybe_stop_tx()
2246 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __iavf_maybe_stop_tx()
2247 ++tx_ring->tx_stats.restart_queue; in __iavf_maybe_stop_tx()
2261 static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, in iavf_tx_map() argument
2270 u16 i = tx_ring->next_to_use; in iavf_tx_map()
2282 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in iavf_tx_map()
2284 tx_desc = IAVF_TX_DESC(tx_ring, i); in iavf_tx_map()
2290 if (dma_mapping_error(tx_ring->dev, dma)) in iavf_tx_map()
2309 if (i == tx_ring->count) { in iavf_tx_map()
2310 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_tx_map()
2330 if (i == tx_ring->count) { in iavf_tx_map()
2331 tx_desc = IAVF_TX_DESC(tx_ring, 0); in iavf_tx_map()
2338 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in iavf_tx_map()
2341 tx_bi = &tx_ring->tx_bi[i]; in iavf_tx_map()
2344 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in iavf_tx_map()
2347 if (i == tx_ring->count) in iavf_tx_map()
2350 tx_ring->next_to_use = i; in iavf_tx_map()
2352 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); in iavf_tx_map()
2373 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in iavf_tx_map()
2374 writel(i, tx_ring->tail); in iavf_tx_map()
2380 dev_info(tx_ring->dev, "TX DMA map failed\n"); in iavf_tx_map()
2384 tx_bi = &tx_ring->tx_bi[i]; in iavf_tx_map()
2385 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); in iavf_tx_map()
2389 i = tx_ring->count; in iavf_tx_map()
2393 tx_ring->next_to_use = i; in iavf_tx_map()
2404 struct iavf_ring *tx_ring) in iavf_xmit_frame_ring() argument
2419 iavf_trace(xmit_frame_ring, skb, tx_ring); in iavf_xmit_frame_ring()
2428 tx_ring->tx_stats.tx_linearize++; in iavf_xmit_frame_ring()
2437 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { in iavf_xmit_frame_ring()
2438 tx_ring->tx_stats.tx_busy++; in iavf_xmit_frame_ring()
2443 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in iavf_xmit_frame_ring()
2449 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in iavf_xmit_frame_ring()
2470 tx_ring, &cd_tunneling); in iavf_xmit_frame_ring()
2477 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in iavf_xmit_frame_ring()
2480 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in iavf_xmit_frame_ring()
2486 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); in iavf_xmit_frame_ring()
2502 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; in iavf_xmit_frame() local
2514 return iavf_xmit_frame_ring(skb, tx_ring); in iavf_xmit_frame()